summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/DIR_METADATA11
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h108
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h123
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h122
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc23
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h59
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc242
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h4
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h76
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h101
-rw-r--r--deps/v8/src/wasm/baseline/ppc/OWNERS1
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h60
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h60
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h127
-rw-r--r--deps/v8/src/wasm/c-api.cc12
-rw-r--r--deps/v8/src/wasm/decoder.h189
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1221
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc44
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h4
-rw-r--r--deps/v8/src/wasm/function-compiler.cc22
-rw-r--r--deps/v8/src/wasm/function-compiler.h18
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc53
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc41
-rw-r--r--deps/v8/src/wasm/memory-tracing.h8
-rw-r--r--deps/v8/src/wasm/module-compiler.cc901
-rw-r--r--deps/v8/src/wasm/module-compiler.h3
-rw-r--r--deps/v8/src/wasm/module-decoder.cc81
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc62
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc13
-rw-r--r--deps/v8/src/wasm/value-type.h133
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc50
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h25
-rw-r--r--deps/v8/src/wasm/wasm-constants.h5
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.cc8
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc81
-rw-r--r--deps/v8/src/wasm/wasm-debug.h4
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc5
-rw-r--r--deps/v8/src/wasm/wasm-engine.h4
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc60
-rw-r--r--deps/v8/src/wasm/wasm-js.cc785
-rw-r--r--deps/v8/src/wasm/wasm-js.h4
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h4
-rw-r--r--deps/v8/src/wasm/wasm-module.cc11
-rw-r--r--deps/v8/src/wasm/wasm-module.h47
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h25
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc73
-rw-r--r--deps/v8/src/wasm/wasm-objects.h32
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq13
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h49
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h126
-rw-r--r--deps/v8/src/wasm/wasm-result.h9
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc14
-rw-r--r--deps/v8/src/wasm/wasm-value.h11
56 files changed, 3259 insertions, 2118 deletions
diff --git a/deps/v8/src/wasm/DIR_METADATA b/deps/v8/src/wasm/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/wasm/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 801795058d..38224181e9 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -8,5 +8,3 @@ thibaudm@chromium.org
zhin@chromium.org
per-file wasm-js.*=adamk@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b4966c012b..af969f387e 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -139,6 +139,8 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
+ // The compiler allocated registers such that either {dst == lhs} or there is
+ // no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
(assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
@@ -532,16 +534,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK_EQ(4, size);
ldr(dst, liftoff::GetInstanceOperand());
ldr(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1005,11 +1005,13 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
if (cache_state()->is_used(LiftoffRegister(dst_high))) {
SpillRegister(LiftoffRegister(dst_high));
}
- UseScratchRegisterScope temps(this);
- Register actual_addr = liftoff::CalculateActualAddress(
- this, &temps, src_addr, offset_reg, offset_imm);
- ldrexd(dst_low, dst_high, actual_addr);
- dmb(ISH);
+ {
+ UseScratchRegisterScope temps(this);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ ldrexd(dst_low, dst_high, actual_addr);
+ dmb(ISH);
+ }
ParallelRegisterMove(
{{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}});
@@ -1323,12 +1325,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
-#ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{this}.CanAcquire());
-#endif
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset);
@@ -2259,6 +2259,18 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
NeonMemOperand(actual_src_addr));
vmovl(NeonU32, liftoff::GetSimd128Register(dst), dst.low_fp());
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ if (memtype == MachineType::Int32()) {
+ vmov(dest, 0);
+ vld1s(Neon32, NeonListOperand(dst.low_fp()), 0,
+ NeonMemOperand(actual_src_addr));
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ vmov(dest.high(), 0);
+ vld1(Neon64, NeonListOperand(dest.low()),
+ NeonMemOperand(actual_src_addr));
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -2921,6 +2933,23 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ QwNeonRegister dest = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+
+ vmull(NeonS16, scratch, left.low(), right.low());
+ vpadd(Neon32, dest.low(), scratch.low(), scratch.high());
+
+ vmull(NeonS16, scratch, left.high(), right.high());
+ vpadd(Neon32, dest.high(), scratch.low(), scratch.high());
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon16, liftoff::GetSimd128Register(dst), src.gp());
@@ -3015,9 +3044,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonS16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3028,16 +3057,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonS16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonU16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3048,9 +3077,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonU16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3133,7 +3162,6 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
int table_size = src1 == src2 ? 2 : 4;
- uint32_t mask = table_size == 2 ? 0x0F0F0F0F : 0x1F1F1F1F;
int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
@@ -3141,11 +3169,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
for (int i = 3; i >= 0; i--) {
imm = (imm << 8) | shuffle[j * 4 + i];
}
- uint32_t four_lanes = imm;
+ DCHECK_EQ(0, imm & (table_size == 2 ? 0xF0F0F0F0 : 0xE0E0E0E0));
// Ensure indices are in [0,15] if table_size is 2, or [0,31] if 4.
- four_lanes &= mask;
- vmov(SwVfpRegister::from_code(scratch_s_base + j),
- Float32::FromBits(four_lanes));
+ vmov(SwVfpRegister::from_code(scratch_s_base + j), Float32::FromBits(imm));
}
DwVfpRegister table_base = src1.low();
@@ -3277,9 +3303,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonS8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3290,16 +3316,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonS8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonU8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3310,9 +3336,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonU8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 4fe3abc544..402f0d2e84 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -186,25 +186,36 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
+ temps.Exclude(x16, x17);
+
+ // This is the previous stack pointer value (before we push the lr and the
+ // fp). We need to keep it to autenticate the lr and adjust the new stack
+ // pointer afterwards.
+ Add(x16, fp, 16);
+
+ // Load the fp and lr of the old frame, they will be pushed in the new frame
+ // during the actual call.
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Ldp(fp, x17, MemOperand(fp));
+ Autib1716();
+ Mov(lr, x17);
+#else
+ Ldp(fp, lr, MemOperand(fp));
+#endif
- // Push the return address and frame pointer to complete the stack frame.
- sub(sp, sp, 16);
- ldr(scratch, MemOperand(fp, 8));
- Poke(scratch, 8);
- ldr(scratch, MemOperand(fp, 0));
- Poke(scratch, 0);
+ temps.Include(x17);
+
+ Register scratch = temps.AcquireX();
- // Shift the whole frame upwards.
- int slot_count = num_callee_stack_params + 2;
+ // Shift the whole frame upwards, except for fp and lr.
+ int slot_count = num_callee_stack_params;
for (int i = slot_count - 1; i >= 0; --i) {
ldr(scratch, MemOperand(sp, i * 8));
- str(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ str(scratch, MemOperand(x16, (i - stack_param_delta) * 8));
}
- // Set the new stack and frame pointer.
- Sub(sp, fp, stack_param_delta * 8);
- Pop<kAuthLR>(fp, lr);
+ // Set the new stack pointer.
+ Sub(sp, x16, stack_param_delta * 8);
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
@@ -302,9 +313,8 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
Ldr(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
@@ -314,9 +324,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+ DCHECK_LE(0, offset);
Ldr(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, MemOperand(dst, offset));
}
@@ -676,11 +685,12 @@ void LiftoffAssembler::AtomicCompareExchange(
}
UseScratchRegisterScope temps(this);
- Register store_result = temps.AcquireW();
Register actual_addr = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+ Register store_result = temps.AcquireW();
+
Label retry;
Label done;
Bind(&retry);
@@ -1495,6 +1505,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Ldr(dst.fp().D(), src_op);
Uxtl(dst.fp().V2D(), dst.fp().V2S());
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ Ldr(dst.fp().S(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Ldr(dst.fp().D(), src_op);
+ }
} else {
// ld1r only allows no offset or post-index, so emit an add.
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
@@ -2003,6 +2020,17 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
Umax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope scope(this);
+ VRegister tmp1 = scope.AcquireV(kFormat4S);
+ VRegister tmp2 = scope.AcquireV(kFormat4S);
+ Smull(tmp1, lhs.fp().V4H(), rhs.fp().V4H());
+ Smull2(tmp2, lhs.fp().V8H(), rhs.fp().V8H());
+ Addp(dst.fp().V4S(), tmp1, tmp2);
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V8H(), src.gp().W());
@@ -2105,9 +2133,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2116,15 +2144,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
Sub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2133,9 +2161,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
Mul(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2187,12 +2215,13 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
Mov(src2.Q(), rhs.fp().Q());
}
- uint8_t mask = lhs == rhs ? 0x0F : 0x1F;
int64_t imms[2] = {0, 0};
for (int i = 7; i >= 0; i--) {
- imms[0] = (imms[0] << 8) | (shuffle[i] & mask);
- imms[1] = (imms[1] << 8) | (shuffle[i + 8] & mask);
+ imms[0] = (imms[0] << 8) | (shuffle[i]);
+ imms[1] = (imms[1] << 8) | (shuffle[i + 8]);
}
+ DCHECK_EQ(0, (imms[0] | imms[1]) &
+ (lhs == rhs ? 0xF0F0F0F0F0F0F0F0 : 0xE0E0E0E0E0E0E0E0));
Movi(temp.V16B(), imms[1], imms[0]);
@@ -2307,9 +2336,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
@@ -2318,15 +2347,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
Sub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
@@ -2335,9 +2364,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
Mul(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 3c2fccc997..5e640093c4 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -261,16 +261,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
mov(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
mov(dst, Operand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1456,31 +1454,19 @@ template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
- // First, compute the low half of the result, potentially into a temporary dst
- // register if {dst.low_gp()} equals any register we need to
- // keep alive for computing the upper half.
- LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
- Register dst_low = keep_alive.has(dst.low_gp())
- ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
- : dst.low_gp();
-
- if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
- (assm->*op)(dst_low, Immediate(imm));
+ // The compiler allocated registers such that either {dst == lhs} or there is
+ // no overlap between the two.
+ DCHECK_NE(dst.low_gp(), lhs.high_gp());
- // Now compute the upper half, while keeping alive the previous result.
- keep_alive = LiftoffRegList::ForRegs(dst_low);
- Register dst_high = keep_alive.has(dst.high_gp())
- ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
- : dst.high_gp();
+ // First, compute the low half of the result.
+ if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp());
+ (assm->*op)(dst.low_gp(), Immediate(imm));
- if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
+ // Now compute the upper half.
+ if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
- (assm->*op_with_carry)(dst_high, sign_extend);
-
- // If necessary, move result into the right registers.
- LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
- if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+ (assm->*op_with_carry)(dst.high_gp(), sign_extend);
}
} // namespace liftoff
@@ -2665,6 +2651,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ movss(dst.fp(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ movsd(dst.fp(), src_op);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -2700,15 +2693,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
for (int i = 3; i >= 0; i--) {
push_imm32(imms[i]);
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst.fp(), lhs.fp(), Operand(esp, 0));
- } else {
- if (dst != lhs) {
- movups(dst.fp(), lhs.fp());
- }
- pshufb(dst.fp(), Operand(esp, 0));
- }
+ Pshufb(dst.fp(), lhs.fp(), Operand(esp, 0));
mov(esp, tmp.gp());
return;
}
@@ -2723,7 +2708,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
push(Immediate(mask));
}
- Pshufb(liftoff::kScratchDoubleReg, Operand(esp, 0));
+ Pshufb(liftoff::kScratchDoubleReg, lhs.fp(), Operand(esp, 0));
for (int i = 3; i >= 0; i--) {
uint32_t mask = 0;
@@ -2734,10 +2719,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
push(Immediate(mask));
}
- if (dst.fp() != rhs.fp()) {
- movups(dst.fp(), rhs.fp());
- }
- Pshufb(dst.fp(), Operand(esp, 0));
+ Pshufb(dst.fp(), rhs.fp(), Operand(esp, 0));
Por(dst.fp(), liftoff::kScratchDoubleReg);
mov(esp, tmp.gp());
}
@@ -2751,10 +2733,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
TurboAssembler::Move(mask, uint32_t{0x70707070});
Pshufd(mask, mask, uint8_t{0x0});
Paddusb(mask, rhs.fp());
- if (lhs != dst) {
- Movaps(dst.fp(), lhs.fp());
- }
- Pshufb(dst.fp(), mask);
+ Pshufb(dst.fp(), lhs.fp(), mask);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -3211,16 +3190,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
this, dst, lhs, rhs);
}
@@ -3231,16 +3210,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
&Assembler::psubusb>(this, dst, lhs,
rhs);
@@ -3409,16 +3388,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
this, dst, lhs, rhs);
}
@@ -3429,16 +3408,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
&Assembler::psubusw>(this, dst, lhs,
rhs);
@@ -3588,6 +3567,13 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaddwd, &Assembler::pmaddwd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg =
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index e219025e53..dea5221ac6 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -37,6 +37,7 @@ class StackTransferRecipe {
struct RegisterLoad {
enum LoadKind : uint8_t {
+ kNop, // no-op, used for high fp of a fp pair.
kConstant, // load a constant value into a register.
kStack, // fill a register from a stack slot.
kLowHalfStack, // fill a register from the low half of a stack slot.
@@ -63,6 +64,10 @@ class StackTransferRecipe {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
offset};
}
+ static RegisterLoad Nop() {
+ // ValueType does not matter.
+ return {kNop, kWasmI32, 0};
+ }
private:
RegisterLoad(LoadKind kind, ValueType type, int32_t value)
@@ -71,6 +76,8 @@ class StackTransferRecipe {
public:
explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ StackTransferRecipe(const StackTransferRecipe&) = delete;
+ StackTransferRecipe& operator=(const StackTransferRecipe&) = delete;
~StackTransferRecipe() { Execute(); }
void Execute() {
@@ -217,11 +224,11 @@ class StackTransferRecipe {
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type);
- // load_dst_regs_.set above will set both low and high fp regs.
- // But unlike gp_pair, we load a kWasm128 in one go in ExecuteLoads.
- // So unset the top fp register to skip loading it.
- load_dst_regs_.clear(dst.high());
+ // Only need register_load for low_gp since we load 128 bits at one go.
+ // Both low and high need to be set in load_dst_regs_ but when iterating
+ // over it, both low and high will be cleared, so we won't load twice.
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst.high()) = RegisterLoad::Nop();
} else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
}
@@ -318,6 +325,8 @@ class StackTransferRecipe {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
switch (load->kind) {
+ case RegisterLoad::kNop:
+ break;
case RegisterLoad::kConstant:
asm_->LoadConstant(dst, load->type == kWasmI64
? WasmValue(int64_t{load->value})
@@ -343,8 +352,6 @@ class StackTransferRecipe {
}
load_dst_regs_ = {};
}
-
- DISALLOW_COPY_AND_ASSIGN(StackTransferRecipe);
};
class RegisterReuseMap {
@@ -519,9 +526,7 @@ int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {
namespace {
-constexpr AssemblerOptions DefaultLiftoffOptions() {
- return AssemblerOptions{};
-}
+AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; }
} // namespace
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index e2bd99841f..895abbbbb4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -478,8 +478,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
- inline void LoadFromInstance(Register dst, uint32_t offset, int size);
- inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
+ inline void LoadFromInstance(Register dst, int offset, int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, int offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
@@ -675,6 +675,15 @@ class LiftoffAssembler : public TurboAssembler {
}
}
+ inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
+ if (kSystemPointerSize == 8) {
+ emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
+ LiftoffRegister(src));
+ } else if (dst != src) {
+ Move(dst, src, kWasmI32);
+ }
+ }
+
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
@@ -852,20 +861,16 @@ class LiftoffAssembler : public TurboAssembler {
int32_t rhs);
inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -894,20 +899,16 @@ class LiftoffAssembler : public TurboAssembler {
int32_t rhs);
inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -948,6 +949,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1302,6 +1305,8 @@ void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
class LiftoffStackSlots {
public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ LiftoffStackSlots(const LiftoffStackSlots&) = delete;
+ LiftoffStackSlots& operator=(const LiftoffStackSlots&) = delete;
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half) {
@@ -1328,8 +1333,6 @@ class LiftoffStackSlots {
base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
-
- DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 447be8cdae..1ead202ea0 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -258,9 +258,9 @@ class DebugSideTableBuilder {
class LiftoffCompiler {
public:
// TODO(clemensb): Make this a template parameter.
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation;
- using Value = ValueBase;
+ using Value = ValueBase<validate>;
static constexpr auto kI32 = ValueType::kI32;
static constexpr auto kI64 = ValueType::kI64;
@@ -273,7 +273,7 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState state;
};
- struct Control : public ControlBase<Value> {
+ struct Control : public ControlBase<Value, validate> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
@@ -557,7 +557,7 @@ class LiftoffCompiler {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
__ local_type(i), "param"))
@@ -621,7 +621,7 @@ class LiftoffCompiler {
}
}
- if (FLAG_liftoff_extern_ref) {
+ if (FLAG_experimental_liftoff_extern_ref) {
// Initialize all reference type locals with ref.null.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
@@ -690,30 +690,6 @@ class LiftoffCompiler {
}
if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
-
- // If we are generating debug code, do check the "hook on function call"
- // flag. If set, trigger a break.
- if (V8_UNLIKELY(for_debugging_)) {
- // If there is a breakpoint set on the first instruction (== start of the
- // function), then skip the check for "hook on function call", since we
- // will unconditionally break there anyway.
- bool has_breakpoint = next_breakpoint_ptr_ != nullptr &&
- (*next_breakpoint_ptr_ == 0 ||
- *next_breakpoint_ptr_ == decoder->position());
- if (!has_breakpoint) {
- DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
- kSystemPointerSize);
- Label no_break;
- __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U,
- {});
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
- EmitBreakpoint(decoder);
- __ bind(&no_break);
- }
- }
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
@@ -799,14 +775,14 @@ class LiftoffCompiler {
}
V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
- DCHECK(V8_UNLIKELY(for_debugging_));
+ DCHECK(for_debugging_);
+ if (!WasmOpcodes::IsBreakable(opcode)) return;
+ bool has_breakpoint = false;
if (next_breakpoint_ptr_) {
if (*next_breakpoint_ptr_ == 0) {
// A single breakpoint at offset 0 indicates stepping.
DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
- if (WasmOpcodes::IsBreakable(opcode)) {
- EmitBreakpoint(decoder);
- }
+ has_breakpoint = true;
} else {
while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
*next_breakpoint_ptr_ < decoder->position()) {
@@ -816,18 +792,34 @@ class LiftoffCompiler {
if (next_breakpoint_ptr_ == next_breakpoint_end_) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
} else if (*next_breakpoint_ptr_ == decoder->position()) {
- DCHECK(WasmOpcodes::IsBreakable(opcode));
- EmitBreakpoint(decoder);
+ has_breakpoint = true;
}
}
}
- if (dead_breakpoint_ == decoder->position()) {
+ if (has_breakpoint) {
+ EmitBreakpoint(decoder);
+ // Once we emitted a breakpoint, we don't need to check the "hook on
+ // function call" any more.
+ checked_hook_on_function_call_ = true;
+ } else if (!checked_hook_on_function_call_) {
+ checked_hook_on_function_call_ = true;
+ // Check the "hook on function call" flag. If set, trigger a break.
+ DEBUG_CODE_COMMENT("check hook on function call");
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
+ LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize);
+ Label no_break;
+ __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
+ // Unary "equal" means "equals zero".
+ __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
+ EmitBreakpoint(decoder);
+ __ bind(&no_break);
+ } else if (dead_breakpoint_ == decoder->position()) {
DCHECK(!next_breakpoint_ptr_ ||
*next_breakpoint_ptr_ != dead_breakpoint_);
// The top frame is paused at this position, but the breakpoint was
- // removed. Adding a dead breakpoint here ensures that the source position
- // exists, and that the offset to the return address is the same as in the
- // old code.
+ // removed. Adding a dead breakpoint here ensures that the source
+ // position exists, and that the offset to the return address is the
+ // same as in the old code.
Label cont;
__ emit_jump(&cont);
EmitBreakpoint(decoder);
@@ -843,7 +835,8 @@ class LiftoffCompiler {
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = decoder->read_prefixed_opcode<Decoder::kValidate>(decoder->pc());
+ opcode = decoder->read_prefixed_opcode<Decoder::kFullValidation>(
+ decoder->pc());
}
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
#endif
@@ -1251,9 +1244,12 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
+ // Either reuse {lhs} for {dst}, or choose a register (pair) which does
+ // not overlap, for easier code generation.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs}, {})
- : __ GetUnusedRegister(result_rc, {});
+ ? __ GetUnusedRegister(result_rc, {lhs}, pinned)
+ : __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType::Primitive(result_type), dst);
@@ -1632,7 +1628,7 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
- if (!FLAG_liftoff_extern_ref) {
+ if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_null");
return;
}
@@ -1815,7 +1811,7 @@ class LiftoffCompiler {
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
global->type, "global")) {
@@ -1854,7 +1850,7 @@ class LiftoffCompiler {
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
global->type, "global")) {
@@ -2184,25 +2180,36 @@ class LiftoffCompiler {
__ SpillAllRegisters();
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- // Get one register for computing the address (offset + index).
- LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // Compute offset+index in address.
- __ LoadConstant(address, WasmValue(offset));
- __ emit_i32_add(address.gp(), address.gp(), index);
+ // Get one register for computing the effective offset (offset + index).
+ LiftoffRegister effective_offset =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(effective_offset, WasmValue(offset));
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Allocate stack slot for MemoryTracingInfo.
__ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
+ // Reuse the {effective_offset} register for all information to be stored in
+ // the MemoryTracingInfo struct.
+ LiftoffRegister data = effective_offset;
+
// Now store all information into the MemoryTracingInfo struct.
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
- StoreType::kI32Store, pinned);
- __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
+ if (kSystemPointerSize == 8) {
+ // Zero-extend the effective offset to u64.
+ CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,
+ nullptr));
+ }
+ __ Store(
+ info.gp(), no_reg, offsetof(MemoryTracingInfo, offset), data,
+ kSystemPointerSize == 8 ? StoreType::kI64Store : StoreType::kI32Store,
+ pinned);
+ __ LoadConstant(data, WasmValue(is_store ? 1 : 0));
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), data,
StoreType::kI32Store8, pinned);
- __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
+ __ LoadConstant(data, WasmValue(static_cast<int>(rep)));
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), data,
StoreType::kI32Store8, pinned);
WasmTraceMemoryDescriptor descriptor;
@@ -2287,15 +2294,11 @@ class LiftoffCompiler {
return;
}
- if (transform == LoadTransformationKind::kZeroExtend) {
- unsupported(decoder, kSimd, "prototyping s128 load zero extend");
- return;
- }
-
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
- // For load splats, LoadType is the size of the load, and for load
- // extends, LoadType is the size of the lane, and it always loads 8 bytes.
+ // For load splats and load zero, LoadType is the size of the load, and for
+ // load extends, LoadType is the size of the lane, and it always loads 8
+ // bytes.
uint32_t access_size =
transform == LoadTransformationKind::kExtend ? 8 : type.size();
if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
@@ -2330,6 +2333,12 @@ class LiftoffCompiler {
}
}
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
+ const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* result) {
+ unsupported(decoder, kSimd, "simd load lane");
+ }
+
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
@@ -2364,6 +2373,12 @@ class LiftoffCompiler {
}
}
+ void StoreLane(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value, const uint8_t laneidx) {
+ unsupported(decoder, kSimd, "simd load lane");
+ }
+
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
@@ -2658,20 +2673,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i8x16_shri_u);
case wasm::kExprI8x16Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
- case wasm::kExprI8x16AddSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_add_saturate_s);
- case wasm::kExprI8x16AddSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_add_saturate_u);
+ case wasm::kExprI8x16AddSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_s);
+ case wasm::kExprI8x16AddSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_u);
case wasm::kExprI8x16Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub);
- case wasm::kExprI8x16SubSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_sub_saturate_s);
- case wasm::kExprI8x16SubSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_sub_saturate_u);
+ case wasm::kExprI8x16SubSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_s);
+ case wasm::kExprI8x16SubSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_u);
case wasm::kExprI8x16Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_mul);
case wasm::kExprI8x16MinS:
@@ -2701,20 +2712,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i16x8_shri_u);
case wasm::kExprI16x8Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
- case wasm::kExprI16x8AddSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_add_saturate_s);
- case wasm::kExprI16x8AddSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_add_saturate_u);
+ case wasm::kExprI16x8AddSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_s);
+ case wasm::kExprI16x8AddSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_u);
case wasm::kExprI16x8Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub);
- case wasm::kExprI16x8SubSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_sub_saturate_s);
- case wasm::kExprI16x8SubSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_sub_saturate_u);
+ case wasm::kExprI16x8SubSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_s);
+ case wasm::kExprI16x8SubSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_u);
case wasm::kExprI16x8Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_mul);
case wasm::kExprI16x8MinS:
@@ -2756,6 +2763,9 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_s);
case wasm::kExprI32x4MaxU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_u);
+ case wasm::kExprI32x4DotI16x8S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_dot_i16x8_s);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
case wasm::kExprI64x2Shl:
@@ -3238,13 +3248,15 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- Register index_plus_offset = index_reg;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
if (offset) {
- if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
- index_plus_offset =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- }
__ emit_i32_addi(index_plus_offset, index_reg, offset);
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
+ } else {
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
LiftoffAssembler::VarState timeout =
@@ -3285,7 +3297,7 @@ class LiftoffCompiler {
}
}
- ValueType sig_reps[] = {kWasmI32, type, kWasmI64};
+ ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
FunctionSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
@@ -3313,16 +3325,18 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- Register index_plus_offset = index_reg;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
if (offset) {
- if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
- index_plus_offset =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- }
__ emit_i32_addi(index_plus_offset, index_reg, offset);
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
+ } else {
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32};
+ ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
FunctionSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
@@ -3806,7 +3820,7 @@ class LiftoffCompiler {
const Value args[], Value returns[], CallKind call_kind) {
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
ret, "return")) {
@@ -3888,7 +3902,7 @@ class LiftoffCompiler {
}
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
ret, "return")) {
@@ -3915,9 +3929,10 @@ class LiftoffCompiler {
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
+ decoder->position(), WasmCode::kThrowWasmTrapTableOutOfBounds);
- uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
+ uint32_t canonical_sig_num =
+ env_->module->canonicalized_type_ids[imm.sig_index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
@@ -4057,6 +4072,11 @@ class LiftoffCompiler {
// address in OSR is correct.
int dead_breakpoint_ = 0;
+ // Remember whether the "hook on function call" has already been checked.
+ // This happens at the first breakable opcode in the function (if compiling
+ // for debugging).
+ bool checked_hook_on_function_call_ = false;
+
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
}
@@ -4094,15 +4114,11 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.CompileBaseline", "func_index", func_index, "body_size",
+ "wasm.CompileBaseline", "funcIndex", func_index, "bodySize",
func_body_size);
Zone zone(allocator, "LiftoffCompilationZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
- base::Optional<TimedHistogramScope> liftoff_compile_time_scope;
- if (counters) {
- liftoff_compile_time_scope.emplace(counters->liftoff_compile_time());
- }
size_t code_size_estimate =
WasmCodeManager::EstimateLiftoffCodeSize(func_body_size);
// Allocate the initial buffer a bit bigger to avoid reallocation during code
@@ -4115,18 +4131,14 @@ WasmCompilationResult ExecuteLiftoffCompilation(
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
- WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, detected, func_body,
call_descriptor, env, &zone, instruction_buffer->CreateView(),
debug_sidetable_builder.get(), for_debugging, func_index, breakpoints,
dead_breakpoint);
decoder.Decode();
- liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) {
- compiler->OnFirstError(&decoder);
- return WasmCompilationResult{};
- }
+ if (decoder.failed()) compiler->OnFirstError(&decoder);
if (counters) {
// Check that the histogram for the bailout reasons has the correct size.
@@ -4172,7 +4184,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
- WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, &detected, func_body,
call_descriptor, env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 49aac008f0..285af7dac0 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -137,8 +137,8 @@ static_assert(2 * kBitsPerGpRegCode >= kBitsPerFpRegCode,
class LiftoffRegister {
static constexpr int needed_bits =
- Max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
- kBitsPerLiftoffRegCode);
+ std::max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
+ kBitsPerLiftoffRegCode);
using storage_t = std::conditional<
needed_bits <= 8, uint8_t,
std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 97b8487848..5c78eca319 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -360,16 +360,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
- DCHECK_LE(offset, kMaxInt);
+ DCHECK_LE(0, offset);
lw(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
lw(dst, MemOperand(dst, offset));
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+ int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1883,16 +1883,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_saturate_s");
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_saturate_u");
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1900,16 +1900,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_saturate_s");
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_saturate_u");
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
}
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1998,16 +1998,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_saturate_s");
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_saturate_u");
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
}
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2015,16 +2015,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_saturate_s");
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_saturate_u");
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
}
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2147,6 +2147,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_neg");
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 4c6c1fe1ce..b97c49437f 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
@@ -339,9 +340,9 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
- DCHECK_LE(offset, kMaxInt);
+ DCHECK_LE(0, offset);
Ld(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
@@ -352,7 +353,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+ int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -378,7 +379,27 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
- bailout(kRefTypes, "GlobalSet");
+ DCHECK_GE(offset_imm, 0);
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Sd(src.gp(), MemOperand(dst_addr, offset_imm));
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ Daddu(scratch, dst_addr, offset_imm);
+ CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -1487,6 +1508,16 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
fill_d(dst_msa, scratch);
ilvr_w(dst_msa, kSimd128RegZero, dst_msa);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ xor_v(dst_msa, dst_msa, dst_msa);
+ if (memtype == MachineType::Int32()) {
+ Lwu(scratch, src_op);
+ insert_w(dst_msa, 0, scratch);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Ld(scratch, src_op);
+ insert_d(dst_msa, 0, scratch);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -1841,15 +1872,15 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1858,15 +1889,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1970,15 +2001,15 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1987,15 +2018,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -2131,6 +2162,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
max_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ dotp_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -2264,6 +2301,8 @@ void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
fsle_w(dst_msa, scratch1, scratch0);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmin_w(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2284,6 +2323,8 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
fsle_w(dst_msa, scratch0, scratch1);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmax_w(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2383,6 +2424,8 @@ void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
fsle_d(dst_msa, scratch1, scratch0);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmin_d(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2403,6 +2446,8 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
fsle_d(dst_msa, scratch0, scratch1);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmax_d(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
diff --git a/deps/v8/src/wasm/baseline/ppc/OWNERS b/deps/v8/src/wasm/baseline/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/wasm/baseline/ppc/OWNERS
+++ b/deps/v8/src/wasm/baseline/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index ef7b720ea9..f75e9db459 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -88,13 +88,11 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -944,6 +942,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1006,9 +1010,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
@@ -1017,15 +1021,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
@@ -1034,9 +1038,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
@@ -1172,9 +1176,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
@@ -1485,15 +1489,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
@@ -1502,9 +1506,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index dc6ce2f0b3..a88baa1146 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -87,13 +87,11 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -948,6 +946,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1010,9 +1014,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
@@ -1021,15 +1025,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
@@ -1038,9 +1042,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
@@ -1176,9 +1180,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
@@ -1187,15 +1191,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
@@ -1204,9 +1208,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 713a1ce72a..a64b0e2e37 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -236,11 +236,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
- movq(dst, liftoff::GetInstanceOperand());
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK(size == 4 || size == 8);
+ movq(dst, liftoff::GetInstanceOperand());
if (size == 4) {
movl(dst, Operand(dst, offset));
} else {
@@ -248,9 +247,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+ DCHECK_LE(0, offset);
movq(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, Operand(dst, offset));
}
@@ -2232,11 +2230,11 @@ void EmitI64x2ShrS(LiftoffAssembler* assm, LiftoffRegister dst,
assm->Pextrq(tmp, lhs.fp(), int8_t{0x0});
assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, int8_t{0x0});
+ assm->Pinsrq(dst.fp(), tmp, uint8_t{0x0});
assm->Pextrq(tmp, lhs.fp(), int8_t{0x1});
assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, int8_t{0x1});
+ assm->Pinsrq(dst.fp(), tmp, uint8_t{0x1});
// restore rcx.
if (restore_rcx) {
@@ -2289,14 +2287,21 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ Movss(dst.fp(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Movsd(dst.fp(), src_op);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
+ Pinsrb(dst.fp(), dst.fp(), src_op, 0);
Pxor(kScratchDoubleReg, kScratchDoubleReg);
Pshufb(dst.fp(), kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
+ Pinsrw(dst.fp(), dst.fp(), src_op, 0);
Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
Punpcklqdq(dst.fp(), dst.fp());
} else if (memtype == MachineType::Int32()) {
@@ -2304,8 +2309,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
CpuFeatureScope avx_scope(this, AVX);
vbroadcastss(dst.fp(), src_op);
} else {
- Movss(dst.fp(), src_op);
- Shufps(dst.fp(), dst.fp(), byte{0});
+ movss(dst.fp(), src_op);
+ shufps(dst.fp(), dst.fp(), byte{0});
}
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
@@ -2324,22 +2329,10 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
wasm::SimdShuffle::Pack16Lanes(imms, shuffle);
TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0]));
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst != lhs) {
- movups(dst.fp(), lhs.fp());
- }
- pshufb(dst.fp(), kScratchDoubleReg);
- }
+ Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
return;
}
- LiftoffRegister tmp_simd =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs, rhs));
- Movups(kScratchDoubleReg, lhs.fp());
-
uint64_t mask1[2] = {};
for (int i = 15; i >= 0; i--) {
uint8_t lane = shuffle[i];
@@ -2347,10 +2340,8 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask1[j] <<= 8;
mask1[j] |= lane < kSimd128Size ? lane : 0x80;
}
- TurboAssembler::Move(tmp_simd.fp(), mask1[0]);
- movq(kScratchRegister, mask1[1]);
- Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
- Pshufb(kScratchDoubleReg, tmp_simd.fp());
+ TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]);
+ Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2);
uint64_t mask2[2] = {};
for (int i = 15; i >= 0; i--) {
@@ -2359,14 +2350,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask2[j] <<= 8;
mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
}
- TurboAssembler::Move(tmp_simd.fp(), mask2[0]);
- movq(kScratchRegister, mask2[1]);
- Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+ TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]);
- if (dst.fp() != rhs.fp()) {
- Movups(dst.fp(), rhs.fp());
- }
- Pshufb(dst.fp(), tmp_simd.fp());
+ Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2);
Por(dst.fp(), kScratchDoubleReg);
}
@@ -2379,10 +2365,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
TurboAssembler::Move(mask, uint32_t{0x70707070});
Pshufd(mask, mask, uint8_t{0x0});
Paddusb(mask, rhs.fp());
- if (lhs != dst) {
- Movaps(dst.fp(), lhs.fp());
- }
- Pshufb(dst.fp(), mask);
+ Pshufb(dst.fp(), lhs.fp(), mask);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -2413,10 +2396,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Movss(dst.fp(), src.fp());
- }
- Shufps(dst.fp(), src.fp(), static_cast<byte>(0));
+ Shufps(dst.fp(), src.fp(), 0);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -2659,7 +2639,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
movq(kScratchRegister, vals[1]);
- Pinsrq(dst.fp(), kScratchRegister, int8_t{1});
+ Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -2827,16 +2807,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
this, dst, lhs, rhs);
}
@@ -2847,16 +2827,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
&Assembler::psubusb>(this, dst, lhs,
rhs);
@@ -3025,16 +3005,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
this, dst, lhs, rhs);
}
@@ -3045,16 +3025,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
&Assembler::psubusw>(this, dst, lhs,
rhs);
@@ -3204,6 +3184,13 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaddwd, &Assembler::pmaddwd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 0bb6552943..f79833464d 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -209,8 +209,7 @@ auto seal(const typename implement<C>::type* x) -> const C* {
// Configuration
-struct ConfigImpl {
-};
+struct ConfigImpl {};
template <>
struct implement<Config> {
@@ -888,8 +887,8 @@ own<Instance> GetInstance(StoreImpl* store,
own<Frame> CreateFrameFromInternal(i::Handle<i::FixedArray> frames, int index,
i::Isolate* isolate, StoreImpl* store) {
- i::Handle<i::StackTraceFrame> frame(i::StackTraceFrame::cast(frames->get(0)),
- isolate);
+ i::Handle<i::StackTraceFrame> frame(
+ i::StackTraceFrame::cast(frames->get(index)), isolate);
i::Handle<i::WasmInstanceObject> instance =
i::StackTraceFrame::GetWasmInstance(frame);
uint32_t func_index = i::StackTraceFrame::GetWasmFunctionIndex(frame);
@@ -1511,7 +1510,8 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
auto store = func->store();
auto isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
- i::Object raw_function_data = func->v8_object()->shared().function_data();
+ i::Object raw_function_data =
+ func->v8_object()->shared().function_data(v8::kAcquireLoad);
// WasmCapiFunctions can be called directly.
if (raw_function_data.IsWasmCapiFunctionData()) {
@@ -1544,7 +1544,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
if (object_ref->IsTuple2()) {
i::JSFunction jsfunc =
i::JSFunction::cast(i::Tuple2::cast(*object_ref).value2());
- i::Object data = jsfunc.shared().function_data();
+ i::Object data = jsfunc.shared().function_data(v8::kAcquireLoad);
if (data.IsWasmCapiFunctionData()) {
return CallWasmCapiFunction(i::WasmCapiFunctionData::cast(data), args,
results);
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 86cec955b9..458b564313 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -13,7 +13,6 @@
#include "src/base/memory.h"
#include "src/codegen/signature.h"
#include "src/flags/flags.h"
-#include "src/utils/utils.h"
#include "src/utils/vector.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -39,9 +38,12 @@ using DecodeResult = VoidResult;
// a buffer of bytes.
class Decoder {
public:
- enum ValidateFlag : bool { kValidate = true, kNoValidate = false };
-
- enum AdvancePCFlag : bool { kAdvancePc = true, kNoAdvancePc = false };
+ // {ValidateFlag} can be used in a boolean manner ({if (!validate) ...}).
+ enum ValidateFlag : int8_t {
+ kNoValidation = 0, // Don't run validation, assume valid input.
+ kBooleanValidation, // Run validation but only store a generic error.
+ kFullValidation // Run full validation with error message and location.
+ };
enum TraceFlag : bool { kTrace = true, kNoTrace = false };
@@ -59,7 +61,7 @@ class Decoder {
virtual ~Decoder() = default;
- inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
+ bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
if (V8_UNLIKELY(pc > end_ || length > static_cast<uint32_t>(end_ - pc))) {
error(pc, msg);
@@ -70,28 +72,25 @@ class Decoder {
// Reads an 8-bit unsigned integer.
template <ValidateFlag validate>
- inline uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
+ uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
return read_little_endian<uint8_t, validate>(pc, msg);
}
// Reads a 16-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint16_t read_u16(const byte* pc,
- const char* msg = "expected 2 bytes") {
+ uint16_t read_u16(const byte* pc, const char* msg = "expected 2 bytes") {
return read_little_endian<uint16_t, validate>(pc, msg);
}
// Reads a 32-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint32_t read_u32(const byte* pc,
- const char* msg = "expected 4 bytes") {
+ uint32_t read_u32(const byte* pc, const char* msg = "expected 4 bytes") {
return read_little_endian<uint32_t, validate>(pc, msg);
}
// Reads a 64-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint64_t read_u64(const byte* pc,
- const char* msg = "expected 8 bytes") {
+ uint64_t read_u64(const byte* pc, const char* msg = "expected 8 bytes") {
return read_little_endian<uint64_t, validate>(pc, msg);
}
@@ -99,72 +98,64 @@ class Decoder {
template <ValidateFlag validate>
uint32_t read_u32v(const byte* pc, uint32_t* length,
const char* name = "LEB32") {
- return read_leb<uint32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<uint32_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <ValidateFlag validate>
int32_t read_i32v(const byte* pc, uint32_t* length,
const char* name = "signed LEB32") {
- return read_leb<int32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<int32_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length unsigned integer (little endian).
template <ValidateFlag validate>
uint64_t read_u64v(const byte* pc, uint32_t* length,
const char* name = "LEB64") {
- return read_leb<uint64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<uint64_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <ValidateFlag validate>
int64_t read_i64v(const byte* pc, uint32_t* length,
const char* name = "signed LEB64") {
- return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<int64_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length 33-bit signed integer (little endian).
template <ValidateFlag validate>
int64_t read_i33v(const byte* pc, uint32_t* length,
const char* name = "signed LEB33") {
- return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace, 33>(pc, length,
- name);
+ return read_leb<int64_t, validate, kNoTrace, 33>(pc, length, name);
+ }
+
+ // Convenient overload for callers who don't care about length.
+ template <ValidateFlag validate>
+ WasmOpcode read_prefixed_opcode(const byte* pc) {
+ uint32_t len;
+ return read_prefixed_opcode<validate>(pc, &len);
}
// Reads a prefixed-opcode, possibly with variable-length index.
- // The length param is set to the number of bytes this index is encoded with.
- // For most cases (non variable-length), it will be 1.
+ // `length` is set to the number of bytes that make up this opcode,
+ // *including* the prefix byte. For most opcodes, it will be 2.
template <ValidateFlag validate>
- WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length = nullptr,
+ WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length,
const char* name = "prefixed opcode") {
- uint32_t unused_length;
- if (length == nullptr) {
- length = &unused_length;
- }
uint32_t index;
- if (*pc == WasmOpcode::kSimdPrefix) {
- // SIMD opcodes can be multiple bytes (when LEB128 encoded).
- index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
- // Only support SIMD opcodes that go up to 0xFF (when decoded). Anything
- // bigger will need 1 more byte, and the '<< 8' below will be wrong.
- if (validate && V8_UNLIKELY(index > 0xff)) {
- errorf(pc, "Invalid SIMD opcode %d", index);
- }
- } else {
- if (!validate || validate_size(pc, 2, "expected 2 bytes")) {
- DCHECK(validate_size(pc, 2, "expected 2 bytes"));
- index = *(pc + 1);
- *length = 1;
- } else {
- // If kValidate and size validation fails.
- index = 0;
- *length = 0;
- }
+
+ // Prefixed opcodes all use LEB128 encoding.
+ index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
+ *length += 1; // Prefix byte.
+ // Only support opcodes that go up to 0xFF (when decoded). Anything
+ // bigger will need 1 more byte, and the '<< 8' below will be wrong.
+ if (validate && V8_UNLIKELY(index > 0xff)) {
+ errorf(pc, "Invalid prefixed opcode %d", index);
+ // If size validation fails.
+ index = 0;
+ *length = 0;
}
+
return static_cast<WasmOpcode>((*pc) << 8 | index);
}
@@ -186,21 +177,28 @@ class Decoder {
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint32_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
- name);
+ uint32_t result =
+ read_leb<uint32_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<int32_t, kValidate, kAdvancePc, kTrace>(pc_, &length, name);
+ int32_t result =
+ read_leb<int32_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Reads a LEB128 variable-length unsigned 64-bit integer and advances {pc_}.
uint64_t consume_u64v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint64_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
- name);
+ uint64_t result =
+ read_leb<uint64_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
@@ -224,6 +222,14 @@ class Decoder {
return true;
}
+ // Use this for "boolean validation", i.e. if the error message is not used
+ // anyway.
+ void V8_NOINLINE MarkError() {
+ if (!ok()) return;
+ error_ = {0, "validation failed"};
+ onFirstError();
+ }
+
// Do not inline error methods. This has measurable impact on validation time,
// see https://crbug.com/910432.
void V8_NOINLINE error(const char* msg) { errorf(pc_offset(), "%s", msg); }
@@ -234,6 +240,13 @@ class Decoder {
errorf(offset, "%s", msg);
}
+ void V8_NOINLINE PRINTF_FORMAT(2, 3) errorf(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(pc_offset(), format, args);
+ va_end(args);
+ }
+
void V8_NOINLINE PRINTF_FORMAT(3, 4)
errorf(uint32_t offset, const char* format, ...) {
va_list args;
@@ -343,8 +356,8 @@ class Decoder {
onFirstError();
}
- template <typename IntType, bool validate>
- inline IntType read_little_endian(const byte* pc, const char* msg) {
+ template <typename IntType, ValidateFlag validate>
+ IntType read_little_endian(const byte* pc, const char* msg) {
if (!validate) {
DCHECK(validate_size(pc, sizeof(IntType), msg));
} else if (!validate_size(pc, sizeof(IntType), msg)) {
@@ -354,36 +367,59 @@ class Decoder {
}
template <typename IntType>
- inline IntType consume_little_endian(const char* name) {
+ IntType consume_little_endian(const char* name) {
TRACE(" +%u %-20s: ", pc_offset(), name);
if (!checkAvailable(sizeof(IntType))) {
traceOffEnd();
pc_ = end_;
return IntType{0};
}
- IntType val = read_little_endian<IntType, false>(pc_, name);
+ IntType val = read_little_endian<IntType, kNoValidation>(pc_, name);
traceByteRange(pc_, pc_ + sizeof(IntType));
TRACE("= %d\n", val);
pc_ += sizeof(IntType);
return val;
}
- template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
- TraceFlag trace, size_t size_in_bits = 8 * sizeof(IntType)>
- inline IntType read_leb(const byte* pc, uint32_t* length,
- const char* name = "varint") {
- DCHECK_IMPLIES(advance_pc, pc == pc_);
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits = 8 * sizeof(IntType)>
+ V8_INLINE IntType read_leb(const byte* pc, uint32_t* length,
+ const char* name = "varint") {
static_assert(size_in_bits <= 8 * sizeof(IntType),
"leb does not fit in type");
TRACE_IF(trace, " +%u %-20s: ", pc_offset(), name);
- return read_leb_tail<IntType, validate, advance_pc, trace, size_in_bits, 0>(
- pc, length, name, 0);
+ // Fast path for single-byte integers.
+ if ((!validate || V8_LIKELY(pc < end_)) && !(*pc & 0x80)) {
+ TRACE_IF(trace, "%02x ", *pc);
+ *length = 1;
+ IntType result = *pc;
+ if (std::is_signed<IntType>::value) {
+ // Perform sign extension.
+ constexpr int sign_ext_shift = int{8 * sizeof(IntType)} - 7;
+ result = (result << sign_ext_shift) >> sign_ext_shift;
+ TRACE_IF(trace, "= %" PRIi64 "\n", static_cast<int64_t>(result));
+ } else {
+ TRACE_IF(trace, "= %" PRIu64 "\n", static_cast<uint64_t>(result));
+ }
+ return result;
+ }
+ return read_leb_slowpath<IntType, validate, trace, size_in_bits>(pc, length,
+ name);
+ }
+
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits = 8 * sizeof(IntType)>
+ V8_NOINLINE IntType read_leb_slowpath(const byte* pc, uint32_t* length,
+ const char* name) {
+ // Create an unrolled LEB decoding function per integer type.
+ return read_leb_tail<IntType, validate, trace, size_in_bits, 0>(pc, length,
+ name, 0);
}
- template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
- TraceFlag trace, size_t size_in_bits, int byte_index>
- IntType read_leb_tail(const byte* pc, uint32_t* length, const char* name,
- IntType result) {
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits, int byte_index>
+ V8_INLINE IntType read_leb_tail(const byte* pc, uint32_t* length,
+ const char* name, IntType result) {
constexpr bool is_signed = std::is_signed<IntType>::value;
constexpr int kMaxLength = (size_in_bits + 6) / 7;
static_assert(byte_index < kMaxLength, "invalid template instantiation");
@@ -404,15 +440,19 @@ class Decoder {
// Compilers are not smart enough to figure out statically that the
// following call is unreachable if is_last_byte is false.
constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
- return read_leb_tail<IntType, validate, advance_pc, trace, size_in_bits,
+ return read_leb_tail<IntType, validate, trace, size_in_bits,
next_byte_index>(pc + 1, length, name, result);
}
- if (advance_pc) pc_ = pc + (at_end ? 0 : 1);
*length = byte_index + (at_end ? 0 : 1);
if (validate && V8_UNLIKELY(at_end || (b & 0x80))) {
TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
- errorf(pc, "expected %s", name);
+ if (validate == kFullValidation) {
+ errorf(pc, "expected %s", name);
+ } else {
+ MarkError();
+ }
result = 0;
+ *length = 0;
}
if (is_last_byte) {
// A signed-LEB128 must sign-extend the final byte, excluding its
@@ -431,12 +471,17 @@ class Decoder {
if (!validate) {
DCHECK(valid_extra_bits);
} else if (V8_UNLIKELY(!valid_extra_bits)) {
- error(pc, "extra bits in varint");
+ if (validate == kFullValidation) {
+ error(pc, "extra bits in varint");
+ } else {
+ MarkError();
+ }
result = 0;
+ *length = 0;
}
}
constexpr int sign_ext_shift =
- is_signed ? Max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
+ is_signed ? std::max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
// Perform sign extension.
result = (result << sign_ext_shift) >> sign_ext_shift;
if (trace && is_signed) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 42b36f359b..3e07806d89 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -44,15 +44,14 @@ struct WasmException;
return true; \
}())
-#define CHECK_PROTOTYPE_OPCODE(feat) \
- DCHECK(this->module_->origin == kWasmOrigin); \
- if (!VALIDATE(this->enabled_.has_##feat())) { \
- this->errorf(this->pc(), \
- "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat \
- ")", \
- opcode); \
- return 0; \
- } \
+#define CHECK_PROTOTYPE_OPCODE(feat) \
+ DCHECK(this->module_->origin == kWasmOrigin); \
+ if (!VALIDATE(this->enabled_.has_##feat())) { \
+ this->DecodeError( \
+ "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat ")", \
+ opcode); \
+ return 0; \
+ } \
this->detected_->Add(kFeature_##feat);
#define ATOMIC_OP_LIST(V) \
@@ -125,6 +124,57 @@ struct WasmException;
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
+// Decoder error with explicit PC and format arguments.
+template <Decoder::ValidateFlag validate, typename... Args>
+void DecodeError(Decoder* decoder, const byte* pc, const char* str,
+ Args&&... args) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ STATIC_ASSERT(sizeof...(Args) > 0);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->errorf(pc, str, std::forward<Args>(args)...);
+ }
+}
+
+// Decoder error with explicit PC and no format arguments.
+template <Decoder::ValidateFlag validate>
+void DecodeError(Decoder* decoder, const byte* pc, const char* str) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->error(pc, str);
+ }
+}
+
+// Decoder error without explicit PC, but with format arguments.
+template <Decoder::ValidateFlag validate, typename... Args>
+void DecodeError(Decoder* decoder, const char* str, Args&&... args) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ STATIC_ASSERT(sizeof...(Args) > 0);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->errorf(str, std::forward<Args>(args)...);
+ }
+}
+
+// Decoder error without explicit PC and without format arguments.
+template <Decoder::ValidateFlag validate>
+void DecodeError(Decoder* decoder, const char* str) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->error(str);
+ }
+}
+
namespace value_type_reader {
V8_INLINE WasmFeature feature_for_heap_type(HeapType heap_type) {
@@ -147,6 +197,12 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
uint32_t* const length, const WasmFeatures& enabled) {
int64_t heap_index = decoder->read_i33v<validate>(pc, length, "heap type");
if (heap_index < 0) {
+ int64_t min_1_byte_leb128 = -64;
+ if (heap_index < min_1_byte_leb128) {
+ DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
+ return HeapType(HeapType::kBottom);
+ }
uint8_t uint_7_mask = 0x7F;
uint8_t code = static_cast<ValueTypeCode>(heap_index) & uint_7_mask;
switch (code) {
@@ -157,8 +213,9 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
case kI31RefCode: {
HeapType result = HeapType::from_code(code);
if (!VALIDATE(enabled.contains(feature_for_heap_type(result)))) {
- decoder->errorf(
- pc, "invalid heap type '%s', enable with --experimental-wasm-%s",
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid heap type '%s', enable with --experimental-wasm-%s",
result.name().c_str(),
WasmFeatures::name_for_feature(feature_for_heap_type(result)));
return HeapType(HeapType::kBottom);
@@ -166,25 +223,25 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
return result;
}
default:
- if (validate) {
- decoder->errorf(pc, "Unknown heap type %" PRId64, heap_index);
- }
+ DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
return HeapType(HeapType::kBottom);
}
UNREACHABLE();
} else {
if (!VALIDATE(enabled.has_typed_funcref())) {
- decoder->error(pc,
- "Invalid indexed heap type, enable with "
- "--experimental-wasm-typed-funcref");
+ DecodeError<validate>(decoder, pc,
+ "Invalid indexed heap type, enable with "
+ "--experimental-wasm-typed-funcref");
return HeapType(HeapType::kBottom);
}
uint32_t type_index = static_cast<uint32_t>(heap_index);
if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
- decoder->errorf(pc,
- "Type index %u is greater than the maximum number %zu "
- "of type definitions supported by V8",
- type_index, kV8MaxWasmTypes);
+ DecodeError<validate>(
+ decoder, pc,
+ "Type index %u is greater than the maximum number %zu "
+ "of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
return HeapType(HeapType::kBottom);
}
return HeapType(type_index);
@@ -214,8 +271,9 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
ValueType result = ValueType::Ref(
heap_type, code == kI31RefCode ? kNonNullable : kNullable);
if (!VALIDATE(enabled.contains(feature_for_heap_type(heap_type)))) {
- decoder->errorf(
- pc, "invalid value type '%s', enable with --experimental-wasm-%s",
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type '%s', enable with --experimental-wasm-%s",
result.name().c_str(),
WasmFeatures::name_for_feature(feature_for_heap_type(heap_type)));
return kWasmBottom;
@@ -234,10 +292,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kOptRefCode: {
Nullability nullability = code == kOptRefCode ? kNullable : kNonNullable;
if (!VALIDATE(enabled.has_typed_funcref())) {
- decoder->errorf(pc,
- "Invalid type '(ref%s <heaptype>)', enable with "
- "--experimental-wasm-typed-funcref",
- nullability == kNullable ? " null" : "");
+ DecodeError<validate>(decoder, pc,
+ "Invalid type '(ref%s <heaptype>)', enable with "
+ "--experimental-wasm-typed-funcref",
+ nullability == kNullable ? " null" : "");
return kWasmBottom;
}
HeapType heap_type =
@@ -248,18 +306,20 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
case kRttCode: {
if (!VALIDATE(enabled.has_gc())) {
- decoder->error(
- pc, "invalid value type 'rtt', enable with --experimental-wasm-gc");
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 'rtt', enable with --experimental-wasm-gc");
return kWasmBottom;
}
uint32_t depth_length;
uint32_t depth =
decoder->read_u32v<validate>(pc + 1, &depth_length, "depth");
if (!VALIDATE(depth <= kV8MaxRttSubtypingDepth)) {
- decoder->errorf(pc,
- "subtyping depth %u is greater than the maximum depth "
- "%u supported by V8",
- depth, kV8MaxRttSubtypingDepth);
+ DecodeError<validate>(
+ decoder, pc,
+ "subtyping depth %u is greater than the maximum depth "
+ "%u supported by V8",
+ depth, kV8MaxRttSubtypingDepth);
return kWasmBottom;
}
HeapType heap_type = read_heap_type<validate>(
@@ -270,9 +330,9 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
case kS128Code: {
if (!VALIDATE(enabled.has_simd())) {
- decoder->error(pc,
- "invalid value type 's128', enable with "
- "--experimental-wasm-simd");
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 's128', enable with --experimental-wasm-simd");
return kWasmBottom;
}
return kWasmS128;
@@ -376,8 +436,9 @@ struct SelectTypeImmediate {
uint8_t num_types =
decoder->read_u32v<validate>(pc, &length, "number of select types");
if (!VALIDATE(num_types == 1)) {
- decoder->error(
- pc + 1, "Invalid number of types. Select accepts exactly one type");
+ DecodeError<validate>(
+ decoder, pc + 1,
+ "Invalid number of types. Select accepts exactly one type");
return;
}
uint32_t type_length;
@@ -385,7 +446,7 @@ struct SelectTypeImmediate {
&type_length, enabled);
length += type_length;
if (!VALIDATE(type != kWasmBottom)) {
- decoder->error(pc + 1, "invalid select type");
+ DecodeError<validate>(decoder, pc + 1, "invalid select type");
}
}
};
@@ -402,18 +463,20 @@ struct BlockTypeImmediate {
int64_t block_type =
decoder->read_i33v<validate>(pc, &length, "block type");
if (block_type < 0) {
- if ((static_cast<uint8_t>(block_type) & byte{0x7f}) == kVoidCode) return;
+ constexpr int64_t kVoidCode_i64_extended = (~int64_t{0x7F}) | kVoidCode;
+ if (block_type == kVoidCode_i64_extended) return;
type = value_type_reader::read_value_type<validate>(decoder, pc, &length,
enabled);
if (!VALIDATE(type != kWasmBottom)) {
- decoder->errorf(pc, "Invalid block type %" PRId64, block_type);
+ DecodeError<validate>(decoder, pc, "Invalid block type %" PRId64,
+ block_type);
}
} else {
if (!VALIDATE(enabled.has_mv())) {
- decoder->errorf(pc,
- "invalid block type %" PRId64
- ", enable with --experimental-wasm-mv",
- block_type);
+ DecodeError<validate>(decoder, pc,
+ "invalid block type %" PRId64
+ ", enable with --experimental-wasm-mv",
+ block_type);
return;
}
type = kWasmBottom;
@@ -480,7 +543,8 @@ struct MemoryIndexImmediate {
inline MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u8<validate>(pc, "memory index");
if (!VALIDATE(index == 0)) {
- decoder->errorf(pc, "expected memory index 0, found %u", index);
+ DecodeError<validate>(decoder, pc, "expected memory index 0, found %u",
+ index);
}
}
};
@@ -543,8 +607,8 @@ struct CallIndirectImmediate {
TableIndexImmediate<validate> table(decoder, pc + len);
if (!VALIDATE((table.index == 0 && table.length == 1) ||
enabled.has_reftypes())) {
- decoder->errorf(pc + len, "expected table index 0, found %u",
- table.index);
+ DecodeError<validate>(decoder, pc + len,
+ "expected table index 0, found %u", table.index);
}
table_index = table.index;
length = len + table.length;
@@ -623,10 +687,11 @@ struct MemoryAccessImmediate {
alignment =
decoder->read_u32v<validate>(pc, &alignment_length, "alignment");
if (!VALIDATE(alignment <= max_alignment)) {
- decoder->errorf(pc,
- "invalid alignment; expected maximum alignment is %u, "
- "actual alignment is %u",
- max_alignment, alignment);
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid alignment; expected maximum alignment is %u, "
+ "actual alignment is %u",
+ max_alignment, alignment);
}
uint32_t offset_length;
offset = decoder->read_u32v<validate>(pc + alignment_length, &offset_length,
@@ -746,12 +811,29 @@ struct HeapTypeImmediate {
}
};
+template <Decoder::ValidateFlag validate>
+struct PcForErrors {
+ PcForErrors(const byte* /* pc */) {}
+
+ const byte* pc() const { return nullptr; }
+};
+
+template <>
+struct PcForErrors<Decoder::kFullValidation> {
+ const byte* pc_for_errors = nullptr;
+
+ PcForErrors(const byte* pc) : pc_for_errors(pc) {}
+
+ const byte* pc() const { return pc_for_errors; }
+};
+
// An entry on the value stack.
-struct ValueBase {
- const byte* pc = nullptr;
+template <Decoder::ValidateFlag validate>
+struct ValueBase : public PcForErrors<validate> {
ValueType type = kWasmStmt;
- ValueBase(const byte* pc, ValueType type) : pc(pc), type(type) {}
+ ValueBase(const byte* pc, ValueType type)
+ : PcForErrors<validate>(pc), type(type) {}
};
template <typename Value>
@@ -794,12 +876,11 @@ enum Reachability : uint8_t {
};
// An entry on the control stack (i.e. if, block, loop, or try).
-template <typename Value>
-struct ControlBase {
+template <typename Value, Decoder::ValidateFlag validate>
+struct ControlBase : public PcForErrors<validate> {
ControlKind kind = kControlBlock;
uint32_t locals_count = 0;
uint32_t stack_depth = 0; // stack height at the beginning of the construct.
- const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
@@ -810,10 +891,10 @@ struct ControlBase {
ControlBase(ControlKind kind, uint32_t locals_count, uint32_t stack_depth,
const uint8_t* pc, Reachability reachability)
- : kind(kind),
+ : PcForErrors<validate>(pc),
+ kind(kind),
locals_count(locals_count),
stack_depth(stack_depth),
- pc(pc),
reachability(reachability),
start_merge(reachability == kReachable) {
DCHECK(kind == kControlLet || locals_count == 0);
@@ -904,8 +985,13 @@ struct ControlBase {
F(LoadTransform, LoadType type, LoadTransformationKind transform, \
const MemoryAccessImmediate<validate>& imm, const Value& index, \
Value* result) \
+ F(LoadLane, LoadType type, const Value& value, const Value& index, \
+ const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
+ Value* result) \
F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
const Value& index, const Value& value) \
+ F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value, const uint8_t laneidx) \
F(CurrentMemoryPages, Value* result) \
F(MemoryGrow, const Value& value, Value* result) \
F(CallDirect, const CallFunctionImmediate<validate>& imm, \
@@ -1035,9 +1121,10 @@ class WasmDecoder : public Decoder {
: local_types_.begin();
// Decode local declarations, if any.
- uint32_t entries = read_u32v<kValidate>(pc, &length, "local decls count");
+ uint32_t entries =
+ read_u32v<kFullValidation>(pc, &length, "local decls count");
if (!VALIDATE(ok())) {
- error(pc + *total_length, "invalid local decls count");
+ DecodeError(pc + *total_length, "invalid local decls count");
return false;
}
@@ -1046,26 +1133,27 @@ class WasmDecoder : public Decoder {
while (entries-- > 0) {
if (!VALIDATE(more())) {
- error(end(), "expected more local decls but reached end of input");
+ DecodeError(end(),
+ "expected more local decls but reached end of input");
return false;
}
- uint32_t count =
- read_u32v<kValidate>(pc + *total_length, &length, "local count");
+ uint32_t count = read_u32v<kFullValidation>(pc + *total_length, &length,
+ "local count");
if (!VALIDATE(ok())) {
- error(pc + *total_length, "invalid local count");
+ DecodeError(pc + *total_length, "invalid local count");
return false;
}
DCHECK_LE(local_types_.size(), kV8MaxWasmFunctionLocals);
if (!VALIDATE(count <= kV8MaxWasmFunctionLocals - local_types_.size())) {
- error(pc + *total_length, "local count too large");
+ DecodeError(pc + *total_length, "local count too large");
return false;
}
*total_length += length;
- ValueType type = value_type_reader::read_value_type<kValidate>(
+ ValueType type = value_type_reader::read_value_type<kFullValidation>(
this, pc + *total_length, &length, enabled_);
if (!VALIDATE(type != kWasmBottom)) {
- error(pc + *total_length, "invalid local type");
+ DecodeError(pc + *total_length, "invalid local type");
return false;
}
*total_length += length;
@@ -1081,6 +1169,13 @@ class WasmDecoder : public Decoder {
return true;
}
+ // Shorthand that forwards to the {DecodeError} functions above, passing our
+ // {validate} flag.
+ template <typename... Args>
+ void DecodeError(Args... args) {
+ wasm::DecodeError<validate>(this, std::forward<Args>(args)...);
+ }
+
static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc,
uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
@@ -1138,7 +1233,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, LocalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < num_locals())) {
- errorf(pc, "invalid local index: %u", imm.index);
+ DecodeError(pc, "invalid local index: %u", imm.index);
return false;
}
return true;
@@ -1152,7 +1247,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "Invalid exception index: %u", imm.index);
+ DecodeError(pc, "Invalid exception index: %u", imm.index);
return false;
}
return true;
@@ -1160,7 +1255,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->globals.size())) {
- errorf(pc, "invalid global index: %u", imm.index);
+ DecodeError(pc, "invalid global index: %u", imm.index);
return false;
}
imm.global = &module_->globals[imm.index];
@@ -1176,15 +1271,15 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
if (Complete(imm)) return true;
- errorf(pc, "invalid struct index: %u", imm.index);
+ DecodeError(pc, "invalid struct index: %u", imm.index);
return false;
}
inline bool Validate(const byte* pc, FieldIndexImmediate<validate>& imm) {
if (!Validate(pc, imm.struct_index)) return false;
if (!VALIDATE(imm.index < imm.struct_index.struct_type->field_count())) {
- errorf(pc + imm.struct_index.length, "invalid field index: %u",
- imm.index);
+ DecodeError(pc + imm.struct_index.length, "invalid field index: %u",
+ imm.index);
return false;
}
return true;
@@ -1198,7 +1293,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "invalid array index: %u", imm.index);
+ DecodeError(pc, "invalid array index: %u", imm.index);
return false;
}
return true;
@@ -1225,7 +1320,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallFunctionImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "invalid function index: %u", imm.index);
+ DecodeError(pc, "invalid function index: %u", imm.index);
return false;
}
return true;
@@ -1242,27 +1337,28 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
if (!VALIDATE(imm.table_index < module_->tables.size())) {
- error("call_indirect: table index immediate out of bounds");
+ DecodeError(pc, "call_indirect: table index immediate out of bounds");
return false;
}
ValueType table_type = module_->tables[imm.table_index].type;
if (!VALIDATE(IsSubtypeOf(table_type, kWasmFuncRef, module_))) {
- errorf(pc, "call_indirect: immediate table #%u is not of a function type",
- imm.table_index);
+ DecodeError(
+ pc, "call_indirect: immediate table #%u is not of a function type",
+ imm.table_index);
return false;
}
if (!Complete(imm)) {
- errorf(pc, "invalid signature index: #%u", imm.sig_index);
+ DecodeError(pc, "invalid signature index: #%u", imm.sig_index);
return false;
}
// Check that the dynamic signature for this call is a subtype of the static
// type of the table the function is defined in.
ValueType immediate_type = ValueType::Ref(imm.sig_index, kNonNullable);
if (!VALIDATE(IsSubtypeOf(immediate_type, table_type, module_))) {
- errorf(pc,
- "call_indirect: Immediate signature #%u is not a subtype of "
- "immediate table #%u",
- imm.sig_index, imm.table_index);
+ DecodeError(pc,
+ "call_indirect: Immediate signature #%u is not a subtype of "
+ "immediate table #%u",
+ imm.sig_index, imm.table_index);
}
return true;
}
@@ -1270,7 +1366,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
- errorf(pc, "invalid branch depth: %u", imm.depth);
+ DecodeError(pc, "invalid branch depth: %u", imm.depth);
return false;
}
return true;
@@ -1279,8 +1375,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
size_t block_depth) {
if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
- errorf(pc, "invalid table count (> max br_table size): %u",
- imm.table_count);
+ DecodeError(pc, "invalid table count (> max br_table size): %u",
+ imm.table_count);
return false;
}
return checkAvailable(imm.table_count);
@@ -1324,7 +1420,7 @@ class WasmDecoder : public Decoder {
break;
}
if (!VALIDATE(imm.lane >= 0 && imm.lane < num_lanes)) {
- error(pc, "invalid lane index");
+ DecodeError(pc, "invalid lane index");
return false;
} else {
return true;
@@ -1338,7 +1434,7 @@ class WasmDecoder : public Decoder {
}
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
if (!VALIDATE(max_lane < 2 * kSimd128Size)) {
- error(pc, "invalid shuffle mask");
+ DecodeError(pc, "invalid shuffle mask");
return false;
}
return true;
@@ -1356,8 +1452,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BlockTypeImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "block type index %u out of bounds (%zu types)", imm.sig_index,
- module_->types.size());
+ DecodeError(pc, "block type index %u out of bounds (%zu types)",
+ imm.sig_index, module_->types.size());
return false;
}
return true;
@@ -1365,11 +1461,11 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) {
- errorf(pc, "invalid function index: %u", imm.index);
+ DecodeError(pc, "invalid function index: %u", imm.index);
return false;
}
if (!VALIDATE(module_->functions[imm.index].declared)) {
- this->errorf(pc, "undeclared reference to function #%u", imm.index);
+ DecodeError(pc, "undeclared reference to function #%u", imm.index);
return false;
}
return true;
@@ -1377,7 +1473,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
if (!VALIDATE(module_->has_memory)) {
- errorf(pc, "memory instruction with no memory");
+ DecodeError(pc, "memory instruction with no memory");
return false;
}
return true;
@@ -1386,7 +1482,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, MemoryInitImmediate<validate>& imm) {
if (!VALIDATE(imm.data_segment_index <
module_->num_declared_data_segments)) {
- errorf(pc, "invalid data segment index: %u", imm.data_segment_index);
+ DecodeError(pc, "invalid data segment index: %u", imm.data_segment_index);
return false;
}
if (!Validate(pc + imm.length - imm.memory.length, imm.memory))
@@ -1396,7 +1492,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, DataDropImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
- errorf(pc, "invalid data segment index: %u", imm.index);
+ DecodeError(pc, "invalid data segment index: %u", imm.index);
return false;
}
return true;
@@ -1409,7 +1505,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->tables.size())) {
- errorf(pc, "invalid table index: %u", imm.index);
+ DecodeError(pc, "invalid table index: %u", imm.index);
return false;
}
return true;
@@ -1417,7 +1513,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, TableInitImmediate<validate>& imm) {
if (!VALIDATE(imm.elem_segment_index < module_->elem_segments.size())) {
- errorf(pc, "invalid element segment index: %u", imm.elem_segment_index);
+ DecodeError(pc, "invalid element segment index: %u",
+ imm.elem_segment_index);
return false;
}
if (!Validate(pc + imm.length - imm.table.length, imm.table)) {
@@ -1426,8 +1523,8 @@ class WasmDecoder : public Decoder {
ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
module_))) {
- errorf(pc, "table %u is not a super-type of %s", imm.table.index,
- elem_type.name().c_str());
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table.index,
+ elem_type.name().c_str());
return false;
}
return true;
@@ -1435,7 +1532,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ElemDropImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->elem_segments.size())) {
- errorf(pc, "invalid element segment index: %u", imm.index);
+ DecodeError(pc, "invalid element segment index: %u", imm.index);
return false;
}
return true;
@@ -1447,8 +1544,8 @@ class WasmDecoder : public Decoder {
ValueType src_type = module_->tables[imm.table_src.index].type;
if (!VALIDATE(IsSubtypeOf(
src_type, module_->tables[imm.table_dst.index].type, module_))) {
- errorf(pc, "table %u is not a super-type of %s", imm.table_dst.index,
- src_type.name().c_str());
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table_dst.index,
+ src_type.name().c_str());
return false;
}
return true;
@@ -1456,12 +1553,12 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, HeapTypeImmediate<validate>& imm) {
if (!VALIDATE(!imm.type.is_bottom())) {
- error(pc, "invalid heap type");
+ DecodeError(pc, "invalid heap type");
return false;
}
if (!VALIDATE(imm.type.is_generic() ||
module_->has_type(imm.type.ref_index()))) {
- errorf(pc, "Type index %u is out of bounds", imm.type.ref_index());
+ DecodeError(pc, "Type index %u is out of bounds", imm.type.ref_index());
return false;
}
return true;
@@ -1581,10 +1678,8 @@ class WasmDecoder : public Decoder {
case kExprF64Const:
return 9;
case kNumericPrefix: {
- byte numeric_index =
- decoder->read_u8<validate>(pc + 1, "numeric_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length);
switch (opcode) {
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
@@ -1594,44 +1689,44 @@ class WasmDecoder : public Decoder {
case kExprI64UConvertSatF32:
case kExprI64SConvertSatF64:
case kExprI64UConvertSatF64:
- return 2;
+ return length;
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryInitImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ DataDropImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryCopyImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableInitImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ ElemDropImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableCopyImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableGrow:
case kExprTableSize:
case kExprTableFill: {
- TableIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
default:
- decoder->error(pc, "invalid numeric opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid numeric opcode");
+ return length;
}
}
case kSimdPrefix: {
@@ -1641,67 +1736,81 @@ class WasmDecoder : public Decoder {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 1 + length;
+ return length;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 2 + length;
+ return length + 1;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + length + 1,
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return length + imm.length;
+ }
+ case kExprS128Load8Lane:
+ case kExprS128Load16Lane:
+ case kExprS128Load32Lane:
+ case kExprS128Load64Lane:
+ case kExprS128Store8Lane:
+ case kExprS128Store16Lane:
+ case kExprS128Store32Lane:
+ case kExprS128Store64Lane: {
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX);
- return 1 + length + imm.length;
+ // 1 more byte for lane index immediate.
+ return length + imm.length + 1;
}
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS128Const:
case kExprI8x16Shuffle:
- return 1 + length + kSimd128Size;
+ return length + kSimd128Size;
default:
- decoder->error(pc, "invalid SIMD opcode");
- return 1 + length;
+ decoder->DecodeError(pc, "invalid SIMD opcode");
+ return length;
}
}
case kAtomicPrefix: {
- byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length,
+ "atomic_index");
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + 2, UINT32_MAX);
- return 2 + imm.length;
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return length + imm.length;
}
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- return 2 + 1;
+ return length + 1;
}
default:
- decoder->error(pc, "invalid Atomics opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid Atomics opcode");
+ return length;
}
}
case kGCPrefix: {
- byte gc_index = decoder->read_u8<validate>(pc + 1, "gc_index");
- WasmOpcode opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ uint32_t length = 0;
+ opcode =
+ decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
switch (opcode) {
case kExprStructNewWithRtt:
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ StructIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprStructSet: {
- FieldIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ FieldIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprArrayNewWithRtt:
case kExprArrayNewDefault:
@@ -1710,39 +1819,39 @@ class WasmDecoder : public Decoder {
case kExprArrayGetU:
case kExprArraySet:
case kExprArrayLen: {
- ArrayIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ ArrayIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprBrOnCast: {
- BranchDepthImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ BranchDepthImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprRttCanon:
case kExprRttSub: {
// TODO(7748): Account for rtt.sub's additional immediates if
// they stick.
HeapTypeImmediate<validate> imm(WasmFeatures::All(), decoder,
- pc + 2);
- return 2 + imm.length;
+ pc + length);
+ return length + imm.length;
}
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
- return 2;
+ return length;
case kExprRefTest:
case kExprRefCast: {
HeapTypeImmediate<validate> ht1(WasmFeatures::All(), decoder,
- pc + 2);
+ pc + length);
HeapTypeImmediate<validate> ht2(WasmFeatures::All(), decoder,
- pc + 2 + ht1.length);
- return 2 + ht1.length + ht2.length;
+ pc + length + ht1.length);
+ return length + ht1.length + ht2.length;
}
default:
// This is unreachable except for malformed modules.
- decoder->error(pc, "invalid gc opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid gc opcode");
+ return length;
}
}
default:
@@ -1966,8 +2075,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->consume_bytes(locals_length);
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->local_type(index).is_defaultable())) {
- this->errorf(
- this->pc(),
+ this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
this->local_type(index).name().c_str());
return this->TraceFailed();
@@ -1980,9 +2088,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(control_.empty())) {
if (control_.size() > 1) {
- this->error(control_.back().pc, "unterminated control structure");
+ this->DecodeError(control_.back().pc(),
+ "unterminated control structure");
} else {
- this->error("function body must end with \"end\" opcode");
+ this->DecodeError("function body must end with \"end\" opcode");
}
return TraceFailed();
}
@@ -1994,19 +2103,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
- this->GetBufferRelativeOffset(this->error_.offset()),
- this->error_.message().c_str());
+ if (this->error_.offset()) {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
+ this->GetBufferRelativeOffset(this->error_.offset()),
+ this->error_.message().c_str());
+ } else {
+ TRACE("wasm-error: %s\n\n", this->error_.message().c_str());
+ }
return false;
}
const char* SafeOpcodeNameAt(const byte* pc) {
+ if (!pc) return "<null>";
if (pc >= this->end_) return "<end>";
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
- opcode = this->template read_prefixed_opcode<Decoder::kValidate>(pc);
+ opcode = this->template read_prefixed_opcode<Decoder::kFullValidation>(pc);
return WasmOpcodes::OpcodeName(opcode);
}
@@ -2067,16 +2181,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool CheckHasMemory() {
if (!VALIDATE(this->module_->has_memory)) {
- this->error(this->pc_ - 1, "memory instruction with no memory");
- return false;
- }
- return true;
- }
-
- bool CheckHasMemoryForAtomics() {
- if (FLAG_wasm_atomics_on_non_shared_memory && CheckHasMemory()) return true;
- if (!VALIDATE(this->module_->has_shared_memory)) {
- this->error(this->pc_ - 1, "Atomic opcodes used without shared memory");
+ this->DecodeError(this->pc_ - 1, "memory instruction with no memory");
return false;
}
return true;
@@ -2084,7 +2189,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool CheckSimdPostMvp(WasmOpcode opcode) {
if (!FLAG_wasm_simd_post_mvp && WasmOpcodes::IsSimdPostMvpOpcode(opcode)) {
- this->error(
+ this->DecodeError(
"simd opcode not available, enable with --wasm-simd-post-mvp");
return false;
}
@@ -2154,41 +2259,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Append(" | ");
for (size_t i = 0; i < decoder_->stack_size(); ++i) {
Value& val = decoder_->stack_[i];
- WasmOpcode val_opcode = static_cast<WasmOpcode>(*val.pc);
- if (WasmOpcodes::IsPrefixOpcode(val_opcode)) {
- val_opcode =
- decoder_->template read_prefixed_opcode<Decoder::kNoValidate>(
- val.pc);
- }
- Append(" %c@%d:%s", val.type.short_name(),
- static_cast<int>(val.pc - decoder_->start_),
- WasmOpcodes::OpcodeName(val_opcode));
- // If the decoder failed, don't try to decode the immediates, as this
- // can trigger a DCHECK failure.
- if (decoder_->failed()) continue;
- switch (val_opcode) {
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(decoder_, val.pc + 1);
- Append("[%d]", imm.value);
- break;
- }
- case kExprLocalGet:
- case kExprLocalSet:
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(decoder_, val.pc + 1);
- Append("[%u]", imm.index);
- break;
- }
- case kExprGlobalGet:
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(decoder_,
- val.pc + 1);
- Append("[%u]", imm.index);
- break;
- }
- default:
- break;
- }
+ Append(" %c", val.type.short_name());
}
}
@@ -2268,16 +2339,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Catch) {
CHECK_PROTOTYPE_OPCODE(eh);
if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
+ this->DecodeError("catch does not match any try");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
+ this->DecodeError("catch does not match any try");
return 0;
}
if (!VALIDATE(c->is_incomplete_try())) {
- this->error("catch already present for try");
+ this->DecodeError("catch already present for try");
return 0;
}
c->kind = kControlTryCatch;
@@ -2298,12 +2369,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* c = control_at(imm.depth.depth);
Value exception = Pop(0, kWasmExnRef);
const WasmExceptionSig* sig = imm.index.exception->sig;
- size_t value_count = sig->parameter_count();
+ int value_count = static_cast<int>(sig->parameter_count());
// TODO(wasm): This operand stack mutation is an ugly hack to make
// both type checking here as well as environment merging in the
// graph builder interface work out of the box. We should introduce
// special handling for both and do minimal/no stack mutation here.
- for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ EnsureStackSpace(value_count);
+ for (int i = 0; i < value_count; ++i) Push(sig->GetParam(i));
Vector<Value> values(stack_ + c->stack_depth, value_count);
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (this->failed()) return 0;
@@ -2314,7 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
} else if (check_result == kInvalidStack) {
return 0;
}
- for (int i = static_cast<int>(value_count) - 1; i >= 0; i--) Pop(i);
+ for (int i = value_count - 1; i >= 0; i--) Pop(i);
Value* pexception = Push(kWasmExnRef);
*pexception = exception;
return 1 + imm.length;
@@ -2330,6 +2402,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
switch (ref_object.type.kind()) {
+ case ValueType::kBottom:
+ // We are in unreachable code, just forward the bottom value.
case ValueType::kRef: {
Value* result = Push(ref_object.type);
CALL_INTERFACE(PassThrough, ref_object, result);
@@ -2347,7 +2421,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
default:
- this->error(this->pc_, "invalid argument type to br_on_null");
+ this->DecodeError("invalid argument type to br_on_null");
return 0;
}
} else if (check_result == kInvalidStack) {
@@ -2361,8 +2435,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
uint32_t old_local_count = this->num_locals();
- // Temporarily add the let-defined values
- // to the beginning of the function locals.
+ // Temporarily add the let-defined values to the beginning of the function
+ // locals.
uint32_t locals_length;
if (!this->DecodeLocals(this->pc() + 1 + imm.length, &locals_length, 0)) {
return 0;
@@ -2406,16 +2480,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Else) {
if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
+ this->DecodeError("else does not match any if");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
+ this->DecodeError("else does not match an if");
return 0;
}
if (!VALIDATE(c->is_onearmed_if())) {
- this->error(this->pc_, "else already present for if");
+ this->DecodeError("else already present for if");
return 0;
}
if (!TypeCheckFallThru()) return 0;
@@ -2430,18 +2504,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(End) {
if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
+ this->DecodeError("end does not match any if, try, or block");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ this->DecodeError("missing catch or catch-all in try");
return 0;
}
if (c->is_onearmed_if()) {
if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->error(c->pc,
- "start-arity and end-arity of one-armed if must match");
+ this->DecodeError(
+ c->pc(), "start-arity and end-arity of one-armed if must match");
return 0;
}
if (!TypeCheckOneArmedIf(c)) return 0;
@@ -2457,7 +2531,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
+ this->DecodeError(this->pc_ + 1, "trailing code after function end");
return 0;
}
// The result of the block is the return value.
@@ -2477,7 +2551,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value tval = Pop(0, fval.type);
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
if (!VALIDATE(!type.is_reference_type())) {
- this->error("select without type is only valid for value type inputs");
+ this->DecodeError(
+ "select without type is only valid for value type inputs");
return 0;
}
Value* result = Push(type);
@@ -2654,16 +2729,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case ValueType::kOptRef:
CALL_INTERFACE_IF_REACHABLE(UnOp, kExprRefIsNull, value, result);
return 1;
+ case ValueType::kBottom:
+ // We are in unreachable code, the return value does not matter.
case ValueType::kRef:
// For non-nullable references, the result is always false.
CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
return 1;
default:
if (validate) {
- this->errorf(this->pc_,
- "invalid argument type to ref.is_null. Expected "
- "reference type, got %s",
- value.type.name().c_str());
+ this->DecodeError(
+ "invalid argument type to ref.is_null. Expected reference type, "
+ "got %s",
+ value.type.name().c_str());
return 0;
}
UNREACHABLE();
@@ -2686,6 +2763,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value value = Pop(0);
switch (value.type.kind()) {
+ case ValueType::kBottom:
+ // We are in unreachable code. Forward the bottom value.
case ValueType::kRef: {
Value* result = Push(value.type);
CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
@@ -2699,10 +2778,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
default:
if (validate) {
- this->errorf(this->pc_,
- "invalid agrument type to ref.as_non_null: Expected "
- "reference type, got %s",
- value.type.name().c_str());
+ this->DecodeError(
+ "invalid agrument type to ref.as_non_null: Expected reference "
+ "type, got %s",
+ value.type.name().c_str());
}
return 0;
}
@@ -2751,8 +2830,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ this->DecodeError("immutable global #%u cannot be assigned", imm.index);
return 0;
}
Value value = Pop(0, imm.type);
@@ -2818,7 +2896,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
+ this->DecodeError("grow_memory is not supported for asmjs modules");
return 0;
}
Value value = Pop(0, kWasmI32);
@@ -2860,9 +2938,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallFunctionImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
- this->errorf(this->pc_, "%s: %s",
- WasmOpcodes::OpcodeName(kExprReturnCall),
- "tail call return types mismatch");
+ this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
+ "tail call return types mismatch");
return 0;
}
ArgVector args = PopArgs(imm.sig);
@@ -2876,9 +2953,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
- this->errorf(this->pc_, "%s: %s",
- WasmOpcodes::OpcodeName(kExprReturnCallIndirect),
- "tail call return types mismatch");
+ this->DecodeError("%s: %s",
+ WasmOpcodes::OpcodeName(kExprReturnCallIndirect),
+ "tail call return types mismatch");
return 0;
}
Value index = Pop(0, kWasmI32);
@@ -2892,12 +2969,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value func_ref = Pop(0);
ValueType func_type = func_ref.type;
- if (!func_type.is_object_reference_type() || !func_type.has_index() ||
- !this->module_->has_signature(func_type.ref_index())) {
- this->errorf(this->pc_,
- "call_ref: Expected function reference on top of stack, "
- "found %s of type %s instead",
- SafeOpcodeNameAt(func_ref.pc), func_type.name().c_str());
+ if (func_type == kWasmBottom) {
+ // We are in unreachable code, maintain the polymorphic stack.
+ return 1;
+ }
+ if (!VALIDATE(func_type.is_object_reference_type() &&
+ func_type.has_index() &&
+ this->module_->has_signature(func_type.ref_index()))) {
+ this->DecodeError(
+ "call_ref: Expected function reference on top of stack, found %s of "
+ "type %s instead",
+ SafeOpcodeNameAt(func_ref.pc()), func_type.name().c_str());
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
@@ -2913,12 +2995,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(return_call);
Value func_ref = Pop(0);
ValueType func_type = func_ref.type;
- if (!func_type.is_object_reference_type() || !func_type.has_index() ||
- !this->module_->has_signature(func_type.ref_index())) {
- this->errorf(this->pc_,
- "return_call_ref: Expected function reference on top of "
- "found %s of type %s instead",
- SafeOpcodeNameAt(func_ref.pc), func_type.name().c_str());
+ if (func_type == kWasmBottom) {
+ // We are in unreachable code, maintain the polymorphic stack.
+ return 1;
+ }
+ if (!VALIDATE(func_type.is_object_reference_type() &&
+ func_type.has_index() &&
+ this->module_->has_signature(func_type.ref_index()))) {
+ this->DecodeError(
+ "return_call_ref: Expected function reference on top of stack, found "
+ "%s of type %s instead",
+ SafeOpcodeNameAt(func_ref.pc()), func_type.name().c_str());
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
@@ -2930,10 +3017,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Numeric) {
- byte numeric_index =
- this->template read_u8<validate>(this->pc_ + 1, "numeric index");
- WasmOpcode full_opcode =
- static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "numeric index");
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
CHECK_PROTOTYPE_OPCODE(reftypes);
@@ -2941,7 +3027,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
trace_msg->AppendOpcode(full_opcode);
- return DecodeNumericOpcode(full_opcode);
+ return DecodeNumericOpcode(full_opcode, opcode_length);
}
DECODE(Simd) {
@@ -2951,25 +3037,25 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->pc_, &opcode_length);
if (!VALIDATE(this->ok())) return 0;
trace_msg->AppendOpcode(full_opcode);
- return DecodeSimdOpcode(full_opcode, 1 + opcode_length);
+ return DecodeSimdOpcode(full_opcode, opcode_length);
}
DECODE(Atomic) {
CHECK_PROTOTYPE_OPCODE(threads);
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- WasmOpcode full_opcode =
- static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "atomic index");
trace_msg->AppendOpcode(full_opcode);
- return DecodeAtomicOpcode(full_opcode);
+ return DecodeAtomicOpcode(full_opcode, opcode_length);
}
DECODE(GC) {
CHECK_PROTOTYPE_OPCODE(gc);
- byte gc_index = this->template read_u8<validate>(this->pc_ + 1, "gc index");
- WasmOpcode full_opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "gc index");
trace_msg->AppendOpcode(full_opcode);
- return DecodeGCOpcode(full_opcode);
+ return DecodeGCOpcode(full_opcode, opcode_length);
}
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
@@ -2980,7 +3066,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(UnknownOrAsmJs) {
// Deal with special asmjs opcodes.
if (!VALIDATE(is_asmjs_module(this->module_))) {
- this->errorf(this->pc(), "Invalid opcode 0x%x", opcode);
+ this->DecodeError("Invalid opcode 0x%x", opcode);
return 0;
}
const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
@@ -3108,7 +3194,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
if (!VALIDATE(this->pc_ == this->end_)) {
- this->error("Beyond end of code");
+ this->DecodeError("Beyond end of code");
}
}
@@ -3207,7 +3293,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
type.size_log_2());
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
Value* result = Push(type.value_type());
CALL_INTERFACE_IF_REACHABLE(LoadMem, type, imm, index, result);
return prefix_len + imm.length;
@@ -3221,27 +3308,58 @@ class WasmFullDecoder : public WasmDecoder<validate> {
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
max_alignment);
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
result);
return opcode_length + imm.length;
}
+ int DecodeLoadLane(LoadType type, uint32_t opcode_length) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
+ type.size_log_2());
+ SimdLaneImmediate<validate> lane_imm(
+ this, this->pc_ + opcode_length + mem_imm.length);
+ Value v128 = Pop(1, kWasmS128);
+ Value index = Pop(0, kWasmI32);
+
+ Value* result = Push(kWasmS128);
+ CALL_INTERFACE_IF_REACHABLE(LoadLane, type, v128, index, mem_imm,
+ lane_imm.lane, result);
+ return opcode_length + mem_imm.length + lane_imm.length;
+ }
+
+ int DecodeStoreLane(StoreType type, uint32_t opcode_length) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
+ type.size_log_2());
+ SimdLaneImmediate<validate> lane_imm(
+ this, this->pc_ + opcode_length + mem_imm.length);
+ Value v128 = Pop(1, kWasmS128);
+ Value index = Pop(0, kWasmI32);
+
+ CALL_INTERFACE_IF_REACHABLE(StoreLane, type, mem_imm, index, v128,
+ lane_imm.lane);
+ return opcode_length + mem_imm.length + lane_imm.length;
+ }
+
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
store.size_log_2());
Value value = Pop(1, store.value_type());
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
CALL_INTERFACE_IF_REACHABLE(StoreMem, store, imm, index, value);
return prefix_len + imm.length;
}
bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
if (!VALIDATE(target < this->control_.size())) {
- this->errorf(pos, "improper branch in br_table target %u (depth %u)",
- index, target);
+ this->DecodeError(pos, "improper branch in br_table target %u (depth %u)",
+ index, target);
return false;
}
return true;
@@ -3263,10 +3381,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int br_arity = merge->arity;
// First we check if the arities match.
if (!VALIDATE(br_arity == static_cast<int>(result_types->size()))) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u (previous was "
- "%zu, this one is %u)",
- index, result_types->size(), br_arity);
+ this->DecodeError(pos,
+ "inconsistent arity in br_table target %u (previous "
+ "was %zu, this one is %u)",
+ index, result_types->size(), br_arity);
return false;
}
@@ -3277,21 +3395,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
(*result_types)[i] =
CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
if (!VALIDATE((*result_types)[i] != kWasmBottom)) {
- this->errorf(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, type.name().c_str(),
- (*merge)[i].type.name().c_str());
+ this->DecodeError(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, type.name().c_str(),
+ (*merge)[i].type.name().c_str());
return false;
}
} else {
// All target must have the same signature.
if (!VALIDATE((*result_types)[i] == (*merge)[i].type)) {
- this->errorf(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, (*result_types)[i].name().c_str(),
- (*merge)[i].type.name().c_str());
+ this->DecodeError(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, (*result_types)[i].name().c_str(),
+ (*merge)[i].type.name().c_str());
return false;
}
}
@@ -3306,10 +3424,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
static_cast<int>(stack_size()) - control_.back().stack_depth;
// There have to be enough values on the stack.
if (!VALIDATE(available >= br_arity)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for branch to "
- "@%d, found %u",
- br_arity, startrel(control_.back().pc), available);
+ this->DecodeError(
+ "expected %u elements on the stack for branch to @%d, found %u",
+ br_arity, startrel(control_.back().pc()), available);
return false;
}
Value* stack_values = stack_end_ - br_arity;
@@ -3317,9 +3434,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
if (!VALIDATE(IsSubtypeOf(val.type, result_types[i], this->module_))) {
- this->errorf(this->pc_,
- "type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ result_types[i].name().c_str(),
+ val.type.name().c_str());
return false;
}
}
@@ -3408,17 +3525,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return DecodeLoadMem(LoadType::kS128Load, opcode_length);
case kExprS128StoreMem:
return DecodeStoreMem(StoreType::kS128Store, opcode_length);
- case kExprS128LoadMem32Zero:
- if (!CheckSimdPostMvp(opcode)) {
- return 0;
- }
+ case kExprS128Load32Zero:
return DecodeLoadTransformMem(LoadType::kI32Load,
LoadTransformationKind::kZeroExtend,
opcode_length);
- case kExprS128LoadMem64Zero:
- if (!CheckSimdPostMvp(opcode)) {
- return 0;
- }
+ case kExprS128Load64Zero:
return DecodeLoadTransformMem(LoadType::kI64Load,
LoadTransformationKind::kZeroExtend,
opcode_length);
@@ -3460,6 +3571,30 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return DecodeLoadTransformMem(LoadType::kI64Load32U,
LoadTransformationKind::kExtend,
opcode_length);
+ case kExprS128Load8Lane: {
+ return DecodeLoadLane(LoadType::kI32Load8S, opcode_length);
+ }
+ case kExprS128Load16Lane: {
+ return DecodeLoadLane(LoadType::kI32Load16S, opcode_length);
+ }
+ case kExprS128Load32Lane: {
+ return DecodeLoadLane(LoadType::kI32Load, opcode_length);
+ }
+ case kExprS128Load64Lane: {
+ return DecodeLoadLane(LoadType::kI64Load, opcode_length);
+ }
+ case kExprS128Store8Lane: {
+ return DecodeStoreLane(StoreType::kI32Store8, opcode_length);
+ }
+ case kExprS128Store16Lane: {
+ return DecodeStoreLane(StoreType::kI32Store16, opcode_length);
+ }
+ case kExprS128Store32Lane: {
+ return DecodeStoreLane(StoreType::kI32Store, opcode_length);
+ }
+ case kExprS128Store64Lane: {
+ return DecodeStoreLane(StoreType::kI64Store, opcode_length);
+ }
case kExprS128Const:
return SimdConstOp(opcode_length);
default: {
@@ -3468,7 +3603,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid simd opcode");
+ this->DecodeError("invalid simd opcode");
return 0;
}
ArgVector args = PopArgs(sig);
@@ -3480,98 +3615,98 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- int DecodeGCOpcode(WasmOpcode opcode) {
+ int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
case kExprStructNewWithRtt: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Pop(imm.struct_type->field_count());
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(this->pc_,
- "struct.new_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ "struct.new_with_rtt expected rtt, found %s of type %s",
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_,
- "struct.new_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ "struct.new_with_rtt expected rtt for type %d, found rtt for "
+ "type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
ArgVector args = PopArgs(imm.struct_type);
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNewWithRtt, imm, rtt, args.begin(),
value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
for (uint32_t i = 0; i < imm.struct_type->field_count(); i++) {
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
- this->errorf(this->pc_,
- "struct.new_default_with_rtt: struct type %d has "
- "non-defaultable type %s for field %d",
- imm.index, ftype.name().c_str(), i);
+ this->DecodeError(
+ "struct.new_default_with_rtt: struct type %d has "
+ "non-defaultable type %s for field %d",
+ imm.index, ftype.name().c_str(), i);
return 0;
}
}
}
Value rtt = Pop(0);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(
- this->pc_,
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
"struct.new_default_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(
- this->pc_,
- "struct.new_default_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ "struct.new_default_with_rtt expected rtt for type %d, found rtt "
+ "for type %s",
imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNewDefault, imm, rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprStructGet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_index.struct_type->field(field.index);
if (!VALIDATE(!field_type.is_packed())) {
- this->error(this->pc_,
- "struct.get used with a field of packed type. "
- "Use struct.get_s or struct.get_u instead.");
+ this->DecodeError(
+ "struct.get used with a field of packed type. Use struct.get_s "
+ "or struct.get_u instead.");
return 0;
}
Value struct_obj =
Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
Value* value = Push(field_type);
CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, true, value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprStructGetU:
case kExprStructGetS: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_index.struct_type->field(field.index);
if (!VALIDATE(field_type.is_packed())) {
- this->errorf(this->pc_,
- "%s is only valid for packed struct fields. "
- "Use struct.get instead.",
- WasmOpcodes::OpcodeName(opcode));
+ this->DecodeError(
+ "%s is only valid for packed struct fields. Use struct.get "
+ "instead.",
+ WasmOpcodes::OpcodeName(opcode));
return 0;
}
Value struct_obj =
@@ -3579,39 +3714,42 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(field_type.Unpacked());
CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field,
opcode == kExprStructGetS, value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprStructSet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
const StructType* struct_type = field.struct_index.struct_type;
if (!VALIDATE(struct_type->mutability(field.index))) {
- this->error(this->pc_, "setting immutable struct field");
+ this->DecodeError("setting immutable struct field");
return 0;
}
Value field_value = Pop(1, struct_type->field(field.index).Unpacked());
Value struct_obj =
Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprArrayNewWithRtt: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Pop(2);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(this->pc_ + 2,
- "array.new_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
+ "array.new_with_rtt expected rtt, found %s of type %s",
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_ + 2,
- "array.new_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
+ "array.new_with_rtt expected rtt for type %d, found "
+ "rtt for type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value length = Pop(1, kWasmI32);
@@ -3619,48 +3757,47 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNewWithRtt, imm, length, initial_value,
rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayNewDefault: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
- this->errorf(this->pc_,
- "array.new_default_with_rtt: array type %d has "
- "non-defaultable element type %s",
- imm.index,
- imm.array_type->element_type().name().c_str());
+ this->DecodeError(
+ "array.new_default_with_rtt: array type %d has "
+ "non-defaultable element type %s",
+ imm.index, imm.array_type->element_type().name().c_str());
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(
- this->pc_ + 2,
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
"array.new_default_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_ + 2,
- "array.new_default_with_rtt expected rtt for type %d, "
- "found rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(this->pc_ + opcode_length,
+ "array.new_default_with_rtt expected rtt for type "
+ "%d, found rtt for type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value length = Pop(0, kWasmI32);
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNewDefault, imm, length, rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayGetS:
case kExprArrayGetU: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_packed())) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"%s is only valid for packed arrays. Use array.get instead.",
WasmOpcodes::OpcodeName(opcode));
return 0;
@@ -3670,15 +3807,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(imm.array_type->element_type().Unpacked());
CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index,
opcode == kExprArrayGetS, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayGet: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(!imm.array_type->element_type().is_packed())) {
- this->error(this->pc_,
- "array.get used with a field of packed type. "
- "Use array.get_s or array.get_u instead.");
+ this->DecodeError(
+ "array.get used with a field of packed type. Use array.get_s or "
+ "array.get_u instead.");
return 0;
}
Value index = Pop(1, kWasmI32);
@@ -3686,53 +3823,54 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(imm.array_type->element_type());
CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, true,
value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArraySet: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->mutability())) {
- this->error(this->pc_, "setting element of immutable array");
+ this->DecodeError("setting element of immutable array");
return 0;
}
Value value = Pop(2, imm.array_type->element_type().Unpacked());
Value index = Pop(1, kWasmI32);
Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayLen: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprI31New: {
Value input = Pop(0, kWasmI32);
Value* value = Push(kWasmI31Ref);
CALL_INTERFACE_IF_REACHABLE(I31New, input, value);
- return 2;
+ return opcode_length;
}
case kExprI31GetS: {
Value i31 = Pop(0, kWasmI31Ref);
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(I31GetS, i31, value);
- return 2;
+ return opcode_length;
}
case kExprI31GetU: {
Value i31 = Pop(0, kWasmI31Ref);
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(I31GetU, i31, value);
- return 2;
+ return opcode_length;
}
case kExprRttCanon: {
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ HeapTypeImmediate<validate> imm(this->enabled_, this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value* value = Push(ValueType::Rtt(imm.type, 1));
CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprRttSub: {
// TODO(7748): The proposal currently includes additional immediates
@@ -3741,29 +3879,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// If these immediates don't get dropped (in the spirit of
// https://github.com/WebAssembly/function-references/pull/31 ),
// implement them here.
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ HeapTypeImmediate<validate> imm(this->enabled_, this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value parent = Pop(0);
- // TODO(7748): Consider exposing "IsSubtypeOfHeap(HeapType t1, t2)" so
- // we can avoid creating (ref heaptype) wrappers here.
- if (!VALIDATE(parent.type.kind() == ValueType::kRtt &&
- IsSubtypeOf(
- ValueType::Ref(imm.type, kNonNullable),
- ValueType::Ref(parent.type.heap_type(), kNonNullable),
- this->module_))) {
- this->error(this->pc_, "rtt.sub requires a supertype rtt on stack");
- return 0;
+ if (parent.type.is_bottom()) {
+ Push(kWasmBottom);
+ } else {
+ // TODO(7748): Consider exposing "IsSubtypeOfHeap(HeapType t1, t2)" so
+ // we can avoid creating (ref heaptype) wrappers here.
+ if (!VALIDATE(parent.type.is_rtt() &&
+ IsSubtypeOf(ValueType::Ref(imm.type, kNonNullable),
+ ValueType::Ref(parent.type.heap_type(),
+ kNonNullable),
+ this->module_))) {
+ this->DecodeError("rtt.sub requires a supertype rtt on stack");
+ return 0;
+ }
+ Value* value =
+ Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
+ CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
}
- Value* value = Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
- CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprRefTest: {
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
HeapTypeImmediate<validate> obj_type(this->enabled_, this,
- this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, obj_type)) return 0;
- int len = 2 + obj_type.length;
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, obj_type)) return 0;
+ int len = opcode_length + obj_type.length;
HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
this->pc_ + len);
if (!this->Validate(this->pc_ + len, rtt_type)) return 0;
@@ -3772,16 +3916,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(IsSubtypeOf(ValueType::Ref(rtt_type.type, kNonNullable),
ValueType::Ref(obj_type.type, kNonNullable),
this->module_))) {
- this->errorf(this->pc_,
- "ref.test: rtt type must be subtype of object type");
+ this->DecodeError(
+ "ref.test: rtt type must be subtype of object type");
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt &&
- rtt.type.heap_type() == rtt_type.type)) {
- this->errorf(this->pc_,
- "ref.test: expected rtt for type %s but got %s",
- rtt_type.type.name().c_str(), rtt.type.name().c_str());
+ if (!VALIDATE(
+ (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
+ rtt.type == kWasmBottom)) {
+ this->DecodeError("ref.test: expected rtt for type %s but got %s",
+ rtt_type.type.name().c_str(),
+ rtt.type.name().c_str());
return 0;
}
Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
@@ -3791,9 +3936,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRefCast: {
HeapTypeImmediate<validate> obj_type(this->enabled_, this,
- this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, obj_type)) return 0;
- int len = 2 + obj_type.length;
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, obj_type)) return 0;
+ int len = opcode_length + obj_type.length;
HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
this->pc_ + len);
if (!this->Validate(this->pc_ + len, rtt_type)) return 0;
@@ -3801,16 +3946,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(IsSubtypeOf(ValueType::Ref(rtt_type.type, kNonNullable),
ValueType::Ref(obj_type.type, kNonNullable),
this->module_))) {
- this->errorf(this->pc_,
- "ref.cast: rtt type must be subtype of object type");
+ this->DecodeError(
+ "ref.cast: rtt type must be subtype of object type");
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt &&
- rtt.type.heap_type() == rtt_type.type)) {
- this->errorf(this->pc_,
- "ref.cast: expected rtt for type %s but got %s",
- rtt_type.type.name().c_str(), rtt.type.name().c_str());
+ if (!VALIDATE(
+ (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
+ rtt.type == kWasmBottom)) {
+ this->DecodeError("ref.cast: expected rtt for type %s but got %s",
+ rtt_type.type.name().c_str(),
+ rtt.type.name().c_str());
return 0;
}
Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
@@ -3819,34 +3965,40 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return len;
}
case kExprBrOnCast: {
- BranchDepthImmediate<validate> branch_depth(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, branch_depth, control_.size())) {
+ BranchDepthImmediate<validate> branch_depth(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
return 0;
}
// TODO(7748): If the heap type immediates remain in the spec, read
// them here.
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->error(this->pc_, "br_on_cast[1]: expected rtt on stack");
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError("br_on_cast[1]: expected rtt on stack");
return 0;
}
Value obj = Pop(0);
- if (!VALIDATE(obj.type.is_object_reference_type())) {
- this->error(this->pc_, "br_on_cast[0]: expected reference on stack");
+ if (!VALIDATE(obj.type.is_object_reference_type() ||
+ rtt.type.is_bottom())) {
+ this->DecodeError("br_on_cast[0]: expected reference on stack");
return 0;
}
// The static type of {obj} must be a supertype of {rtt}'s type.
if (!VALIDATE(
+ rtt.type.is_bottom() || obj.type.is_bottom() ||
IsSubtypeOf(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
ValueType::Ref(obj.type.heap_type(), kNonNullable),
this->module_))) {
- this->error(this->pc_,
- "br_on_cast: rtt type must be a subtype of object type");
+ this->DecodeError(
+ "br_on_cast: rtt type must be a subtype of object type");
return 0;
}
Control* c = control_at(branch_depth.depth);
Value* result_on_branch =
- Push(ValueType::Ref(rtt.type.heap_type(), kNonNullable));
+ Push(rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.heap_type(), kNonNullable));
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnCast, obj, rtt, result_on_branch,
@@ -3858,19 +4010,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Pop(0); // Drop {result_on_branch}, restore original value.
Value* result_on_fallthrough = Push(obj.type);
*result_on_fallthrough = obj;
- return 2 + branch_depth.length;
+ return opcode_length + branch_depth.length;
}
default:
- this->error("invalid gc opcode");
+ this->DecodeError("invalid gc opcode");
return 0;
}
}
- uint32_t DecodeAtomicOpcode(WasmOpcode opcode) {
+ uint32_t DecodeAtomicOpcode(WasmOpcode opcode, uint32_t opcode_length) {
ValueType ret_type;
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode");
return 0;
}
MachineType memtype;
@@ -3892,31 +4044,37 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
case kExprAtomicFence: {
- byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
+ byte zero =
+ this->template read_u8<validate>(this->pc_ + opcode_length, "zero");
if (!VALIDATE(zero == 0)) {
- this->error(this->pc_ + 2, "invalid atomic operand");
+ this->DecodeError(this->pc_ + opcode_length,
+ "invalid atomic operand");
return 0;
}
CALL_INTERFACE_IF_REACHABLE(AtomicFence);
- return 3;
+ return 1 + opcode_length;
}
default:
- this->error("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode");
return 0;
}
- if (!CheckHasMemoryForAtomics()) return 0;
+ if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(
- this, this->pc_ + 2, ElementSizeLog2Of(memtype.representation()));
+ this, this->pc_ + opcode_length,
+ ElementSizeLog2Of(memtype.representation()));
+ // TODO(10949): Fix this for memory64 (index type should be kWasmI64
+ // then).
+ CHECK(!this->module_->is_memory64);
ArgVector args = PopArgs(sig);
Value* result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
- unsigned DecodeNumericOpcode(WasmOpcode opcode) {
+ unsigned DecodeNumericOpcode(WasmOpcode opcode, uint32_t opcode_length) {
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode");
return 0;
}
switch (opcode) {
@@ -3927,88 +4085,90 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprI64SConvertSatF32:
case kExprI64UConvertSatF32:
case kExprI64SConvertSatF64:
- case kExprI64UConvertSatF64:
- return 1 + BuildSimpleOperator(opcode, sig);
+ case kExprI64UConvertSatF64: {
+ BuildSimpleOperator(opcode, sig);
+ return opcode_length;
+ }
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value src = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ DataDropImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
CALL_INTERFACE_IF_REACHABLE(DataDrop, imm);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value src = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value value = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ElemDropImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
CALL_INTERFACE_IF_REACHABLE(ElemDrop, imm);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableGrow: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value delta = Pop(1, sig->GetParam(1));
Value value = Pop(0, this->module_->tables[imm.index].type);
Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableSize: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableFill: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value count = Pop(2, sig->GetParam(2));
Value value = Pop(1, this->module_->tables[imm.index].type);
Value start = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
default:
- this->error("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode");
return 0;
}
}
@@ -4087,15 +4247,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// size increase. Not inlining them should not create a performance
// degradation, because their invocations are guarded by V8_LIKELY.
V8_NOINLINE void PopTypeError(int index, Value val, ValueType expected) {
- this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(this->pc_), index, expected.name().c_str(),
- SafeOpcodeNameAt(val.pc), val.type.name().c_str());
+ this->DecodeError(val.pc(), "%s[%d] expected type %s, found %s of type %s",
+ SafeOpcodeNameAt(this->pc_), index,
+ expected.name().c_str(), SafeOpcodeNameAt(val.pc()),
+ val.type.name().c_str());
}
V8_NOINLINE void NotEnoughArgumentsError(int index) {
- this->errorf(this->pc_,
- "not enough arguments on the stack for %s, expected %d more",
- SafeOpcodeNameAt(this->pc_), index + 1);
+ this->DecodeError(
+ "not enough arguments on the stack for %s, expected %d more",
+ SafeOpcodeNameAt(this->pc_), index + 1);
}
V8_INLINE Value Pop(int index, ValueType expected) {
@@ -4133,6 +4294,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int index_offset = conditional_branch ? 1 : 0;
for (int i = arity - 1; i >= 0; --i) Pop(index_offset + i, merge[i].type);
// Push values of the correct type back on the stack.
+ EnsureStackSpace(arity);
for (int i = 0; i < arity; ++i) Push(merge[i].type);
return this->ok();
}
@@ -4162,8 +4324,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& val = stack_values[i];
Value& old = (*merge)[i];
if (!VALIDATE(IsSubtypeOf(val.type, old.type, this->module_))) {
- this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, old.type.name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ old.type.name().c_str(), val.type.name().c_str());
return false;
}
}
@@ -4179,8 +4341,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
if (!VALIDATE(IsSubtypeOf(start.type, end.type, this->module_))) {
- this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, end.type.name().c_str(), start.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ end.type.name().c_str(), start.type.name().c_str());
return false;
}
}
@@ -4197,10 +4359,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t actual = stack_size() - c.stack_depth;
// Fallthrus must match the arity of the control exactly.
if (!VALIDATE(actual == expected)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c.pc), actual);
+ expected, startrel(c.pc()), actual);
return false;
}
if (expected == 0) return true; // Fast path.
@@ -4216,10 +4377,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int available = static_cast<int>(stack_size()) - c.stack_depth;
// For fallthrus, not more than the needed values should be available.
if (!VALIDATE(available <= arity)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for fallthru to @%d, found %u",
- arity, startrel(c.pc), available);
+ arity, startrel(c.pc()), available);
return false;
}
// Pop all values from the stack for type checking of existing stack
@@ -4246,10 +4406,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t actual =
static_cast<uint32_t>(stack_size()) - control_.back().stack_depth;
if (!VALIDATE(actual >= expected)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc), actual);
+ expected, startrel(c->pc()), actual);
return kInvalidStack;
}
return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
@@ -4270,9 +4429,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int num_available =
static_cast<int>(stack_size()) - control_.back().stack_depth;
if (!VALIDATE(num_available >= num_returns)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for return, found %u",
- num_returns, num_available);
+ this->DecodeError(
+ "expected %u elements on the stack for return, found %u", num_returns,
+ num_available);
return false;
}
@@ -4283,9 +4442,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
if (!VALIDATE(IsSubtypeOf(val.type, expected_type, this->module_))) {
- this->errorf(this->pc_,
- "type error in return[%u] (expected %s, got %s)", i,
- expected_type.name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in return[%u] (expected %s, got %s)", i,
+ expected_type.name().c_str(),
+ val.type.name().c_str());
return false;
}
}
@@ -4350,9 +4509,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
class EmptyInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
- using Value = ValueBase;
- using Control = ControlBase<Value>;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ using Value = ValueBase<validate>;
+ using Control = ControlBase<Value, validate>;
using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index a7471c3a7b..77c84bd615 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -23,8 +23,8 @@ bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
Zone* zone = decls->type_list.get_allocator().zone();
- WasmDecoder<Decoder::kValidate> decoder(zone, nullptr, enabled, &no_features,
- nullptr, start, end, 0);
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ zone, nullptr, enabled, &no_features, nullptr, start, end, 0);
uint32_t length;
if (!decoder.DecodeLocals(decoder.pc(), &length, 0)) {
decls->encoded_size = 0;
@@ -54,7 +54,7 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmModule* module, WasmFeatures* detected,
const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
+ WasmFullDecoder<Decoder::kFullValidation, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
decoder.Decode();
return decoder.toResult(nullptr);
@@ -65,9 +65,9 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidate> decoder(no_zone, no_module, no_features,
- &no_features, no_sig, pc, end, 0);
- return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
+ WasmDecoder<Decoder::kNoValidation> decoder(no_zone, no_module, no_features,
+ &no_features, no_sig, pc, end, 0);
+ return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
@@ -75,7 +75,7 @@ std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
const byte* pc, const byte* end) {
WasmFeatures unused_detected_features = WasmFeatures::None();
Zone* no_zone = nullptr;
- WasmDecoder<Decoder::kNoValidate> decoder(
+ WasmDecoder<Decoder::kNoValidation> decoder(
no_zone, module, WasmFeatures::All(), &unused_detected_features, sig, pc,
end);
return decoder.StackEffect(pc);
@@ -124,9 +124,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
WasmFeatures unused_detected_features = WasmFeatures::None();
- WasmDecoder<Decoder::kNoValidate> decoder(&zone, module, WasmFeatures::All(),
- &unused_detected_features, body.sig,
- body.start, body.end);
+ WasmDecoder<Decoder::kNoValidation> decoder(
+ &zone, module, WasmFeatures::All(), &unused_detected_features, body.sig,
+ body.start, body.end);
int line_nr = 0;
constexpr int kNoByteCode = -1;
@@ -174,7 +174,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
unsigned length =
- WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
+ WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, i.pc());
unsigned offset = 1;
WasmOpcode opcode = i.current();
@@ -243,8 +243,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(), &i,
+ i.pc() + 1);
os << " @" << i.pc_offset();
if (decoder.Complete(imm)) {
for (uint32_t i = 0; i < imm.out_arity(); i++) {
@@ -259,23 +259,23 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " depth=" << imm.depth;
break;
}
case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " depth=" << imm.depth;
break;
}
case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchTableImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " entries=" << imm.table_count;
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ CallIndirectImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
+ &i, i.pc() + 1);
os << " sig #" << imm.sig_index;
if (decoder.Complete(imm)) {
os << ": " << *imm.sig;
@@ -283,7 +283,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ CallFunctionImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " function #" << imm.index;
if (decoder.Complete(imm)) {
os << ": " << *imm.sig;
@@ -304,9 +304,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
- WasmDecoder<Decoder::kValidate> decoder(zone, nullptr, no_features,
- &no_features, nullptr, start, end, 0);
- return WasmDecoder<Decoder::kValidate>::AnalyzeLoopAssignment(
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ zone, nullptr, no_features, &no_features, nullptr, start, end, 0);
+ return WasmDecoder<Decoder::kFullValidation>::AnalyzeLoopAssignment(
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 6bc626cb18..d3144c9e46 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -163,7 +163,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
WasmOpcode current() {
return static_cast<WasmOpcode>(
- read_u8<Decoder::kNoValidate>(pc_, "expected bytecode"));
+ read_u8<Decoder::kNoValidation>(pc_, "expected bytecode"));
}
void next() {
@@ -176,7 +176,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- return read_prefixed_opcode<Decoder::kNoValidate>(pc_);
+ return read_prefixed_opcode<Decoder::kNoValidation>(pc_);
}
};
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 8b41a90992..0e4135f03a 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -267,7 +267,6 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
namespace {
bool UseGenericWrapper(const FunctionSig* sig) {
-// Work only for int32 parameters and 1 or 0 return value for now.
#if V8_TARGET_ARCH_X64
if (sig->returns().size() > 1) {
return false;
@@ -295,10 +294,11 @@ bool UseGenericWrapper(const FunctionSig* sig) {
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
const WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features)
+ const WasmFeatures& enabled_features, AllowGeneric allow_generic)
: is_import_(is_import),
sig_(sig),
- use_generic_wrapper_(UseGenericWrapper(sig) && !is_import),
+ use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) &&
+ !is_import),
job_(use_generic_wrapper_ ? nullptr
: compiler::NewJSToWasmCompilationJob(
isolate, wasm_engine, sig, module,
@@ -339,7 +339,21 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
// Run the compilation unit synchronously.
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
- module, is_import, enabled_features);
+ module, is_import, enabled_features,
+ kAllowGeneric);
+ unit.Execute();
+ return unit.Finalize(isolate);
+}
+
+// static
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
+ Isolate* isolate, const FunctionSig* sig, const WasmModule* module) {
+ // Run the compilation unit synchronously.
+ const bool is_import = false;
+ WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
+ JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
+ module, is_import, enabled_features,
+ kDontAllowGeneric);
unit.Execute();
return unit.Finalize(isolate);
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 3d232773e3..4894076303 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -32,6 +32,8 @@ struct WasmFunction;
class WasmInstructionBuffer final {
public:
WasmInstructionBuffer() = delete;
+ WasmInstructionBuffer(const WasmInstructionBuffer&) = delete;
+ WasmInstructionBuffer& operator=(const WasmInstructionBuffer&) = delete;
~WasmInstructionBuffer();
std::unique_ptr<AssemblerBuffer> CreateView();
std::unique_ptr<uint8_t[]> ReleaseBuffer();
@@ -43,9 +45,6 @@ class WasmInstructionBuffer final {
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
};
struct WasmCompilationResult {
@@ -113,10 +112,15 @@ STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
public:
+ // A flag to mark whether the compilation unit can skip the compilation
+ // and return the builtin (generic) wrapper, when available.
+ enum AllowGeneric : bool { kAllowGeneric = true, kDontAllowGeneric = false };
+
JSToWasmWrapperCompilationUnit(Isolate* isolate, WasmEngine* wasm_engine,
const FunctionSig* sig,
const wasm::WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features);
+ const WasmFeatures& enabled_features,
+ AllowGeneric allow_generic);
~JSToWasmWrapperCompilationUnit();
void Execute();
@@ -131,6 +135,12 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
const WasmModule* module,
bool is_import);
+ // Run a compilation unit synchronously, but ask for the specific
+ // wrapper.
+ static Handle<Code> CompileSpecificJSToWasmWrapper(Isolate* isolate,
+ const FunctionSig* sig,
+ const WasmModule* module);
+
private:
bool is_import_;
const FunctionSig* sig_;
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 3fc6b066bb..ea071df575 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -74,11 +74,11 @@ constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
- struct Value : public ValueBase {
+ struct Value : public ValueBase<validate> {
TFNode* node = nullptr;
template <typename... Args>
@@ -97,7 +97,7 @@ class WasmGraphBuildingInterface {
explicit TryInfo(SsaEnv* c) : catch_env(c) {}
};
- struct Control : public ControlBase<Value> {
+ struct Control : public ControlBase<Value, validate> {
SsaEnv* end_env = nullptr; // end environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
@@ -436,6 +436,13 @@ class WasmGraphBuildingInterface {
index.node, imm.offset, imm.alignment, decoder->position());
}
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
+ const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* result) {
+ result->node = BUILD(LoadLane, type.mem_type(), value.node, index.node,
+ imm.offset, laneidx, decoder->position());
+ }
+
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value) {
@@ -443,6 +450,13 @@ class WasmGraphBuildingInterface {
value.node, decoder->position(), type.value_type());
}
+ void StoreLane(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value, const uint8_t laneidx) {
+ BUILD(StoreLane, type.mem_rep(), index.node, imm.offset, imm.alignment,
+ value.node, laneidx, decoder->position(), type.value_type());
+ }
+
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
result->node = BUILD(CurrentMemoryPages);
}
@@ -1071,33 +1085,20 @@ class WasmGraphBuildingInterface {
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->num_locals() + 1, decoder->zone());
if (decoder->failed()) return;
- if (assigned != nullptr) {
- // Only introduce phis for variables assigned in this loop.
- int instance_cache_index = decoder->num_locals();
- for (int i = decoder->num_locals() - 1; i >= 0; i--) {
- if (!assigned->Contains(i)) continue;
- TFNode* inputs[] = {ssa_env_->locals[i], control()};
- ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
- }
- // Introduce phis for instance cache pointers if necessary.
- if (assigned->Contains(instance_cache_index)) {
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
- control());
- }
+ DCHECK_NOT_NULL(assigned);
- SetEnv(Split(decoder->zone(), ssa_env_));
- builder_->StackCheck(decoder->position());
- return;
- }
-
- // Conservatively introduce phis for all local variables.
+ // Only introduce phis for variables assigned in this loop.
+ int instance_cache_index = decoder->num_locals();
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ if (!assigned->Contains(i)) continue;
TFNode* inputs[] = {ssa_env_->locals[i], control()};
ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
}
-
- // Conservatively introduce phis for instance cache.
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache, control());
+ // Introduce phis for instance cache pointers if necessary.
+ if (assigned->Contains(instance_cache_index)) {
+ builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
+ control());
+ }
SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
@@ -1200,7 +1201,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
compiler::NodeOriginTable* node_origins) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
+ WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
&zone, module, enabled, detected, body, builder);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 075a6e2f25..0d88c4b461 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -19,14 +19,13 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
int position, uint8_t* mem_start) {
EmbeddedVector<char, 91> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
+ Address address = reinterpret_cast<Address>(mem_start) + info->offset;
switch (mem_rep) {
-#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
- case MachineRepresentation::rep: \
- SNPrintF(value, str ":" format, \
- base::ReadLittleEndianValue<ctype1>( \
- reinterpret_cast<Address>(mem_start) + info->address), \
- base::ReadLittleEndianValue<ctype2>( \
- reinterpret_cast<Address>(mem_start) + info->address)); \
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ base::ReadLittleEndianValue<ctype1>(address), \
+ base::ReadLittleEndianValue<ctype2>(address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
@@ -37,30 +36,22 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
#undef TRACE_TYPE
case MachineRepresentation::kSimd128:
SNPrintF(value, "s128:%d %d %d %d / %08x %08x %08x %08x",
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 4),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 8),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 12),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 4),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 8),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 12));
+ base::ReadLittleEndianValue<uint32_t>(address),
+ base::ReadLittleEndianValue<uint32_t>(address + 4),
+ base::ReadLittleEndianValue<uint32_t>(address + 8),
+ base::ReadLittleEndianValue<uint32_t>(address + 12),
+ base::ReadLittleEndianValue<uint32_t>(address),
+ base::ReadLittleEndianValue<uint32_t>(address + 4),
+ base::ReadLittleEndianValue<uint32_t>(address + 8),
+ base::ReadLittleEndianValue<uint32_t>(address + 12));
break;
default:
SNPrintF(value, "???");
}
const char* eng =
tier.has_value() ? ExecutionTierToString(tier.value()) : "?";
- printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position,
- info->is_store ? " store to" : "load from", info->address,
+ printf("%-11s func:%6d+0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
+ position, info->is_store ? " store to" : "load from", info->offset,
value.begin());
}
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index ca1b2f38c4..f025f07ded 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -17,7 +17,7 @@ namespace wasm {
// This struct is create in generated code, hence use low-level types.
struct MemoryTracingInfo {
- uint32_t address;
+ uintptr_t offset;
uint8_t is_store; // 0 or 1
uint8_t mem_rep;
static_assert(
@@ -25,8 +25,10 @@ struct MemoryTracingInfo {
std::underlying_type<MachineRepresentation>::type>::value,
"MachineRepresentation uses uint8_t");
- MemoryTracingInfo(uint32_t addr, bool is_store, MachineRepresentation rep)
- : address(addr), is_store(is_store), mem_rep(static_cast<uint8_t>(rep)) {}
+ MemoryTracingInfo(uintptr_t offset, bool is_store, MachineRepresentation rep)
+ : offset(offset),
+ is_store(is_store),
+ mem_rep(static_cast<uint8_t>(rep)) {}
};
// Callback for tracing a memory operation for debugging.
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 967e092b5b..82f86786a7 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -79,105 +79,24 @@ enum class CompileStrategy : uint8_t {
kDefault = kEager,
};
-// Background compile jobs hold a shared pointer to this token. The token is
-// used to notify them that they should stop. As soon as they see this (after
-// finishing their current compilation unit), they will stop.
-// This allows to already remove the NativeModule without having to synchronize
-// on background compile jobs.
-class BackgroundCompileToken {
- public:
- explicit BackgroundCompileToken(
- const std::shared_ptr<NativeModule>& native_module)
- : native_module_(native_module) {}
-
- void Cancel() {
- base::SharedMutexGuard<base::kExclusive> mutex_guard(
- &compilation_scope_mutex_);
- native_module_.reset();
- }
-
- private:
- friend class BackgroundCompileScope;
-
- std::shared_ptr<NativeModule> StartScope() {
- compilation_scope_mutex_.LockShared();
- return native_module_.lock();
- }
-
- // This private method can only be called via {BackgroundCompileScope}.
- void SchedulePublishCode(NativeModule* native_module,
- std::vector<std::unique_ptr<WasmCode>> codes) {
- {
- base::MutexGuard guard(&publish_mutex_);
- if (publisher_running_) {
- // Add new code to the queue and return.
- publish_queue_.reserve(publish_queue_.size() + codes.size());
- for (auto& c : codes) publish_queue_.emplace_back(std::move(c));
- return;
- }
- publisher_running_ = true;
- }
- while (true) {
- PublishCode(native_module, VectorOf(codes));
- codes.clear();
-
- // Keep publishing new code that came in.
- base::MutexGuard guard(&publish_mutex_);
- DCHECK(publisher_running_);
- if (publish_queue_.empty()) {
- publisher_running_ = false;
- return;
- }
- codes.swap(publish_queue_);
- }
- }
-
- void PublishCode(NativeModule*, Vector<std::unique_ptr<WasmCode>>);
-
- void ExitScope() { compilation_scope_mutex_.UnlockShared(); }
-
- // {compilation_scope_mutex_} protects {native_module_}.
- base::SharedMutex compilation_scope_mutex_;
- std::weak_ptr<NativeModule> native_module_;
-
- // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
- base::Mutex publish_mutex_;
- std::vector<std::unique_ptr<WasmCode>> publish_queue_;
- bool publisher_running_ = false;
-};
-
class CompilationStateImpl;
-// Keep these scopes short, as they hold the mutex of the token, which
-// sequentializes all these scopes. The mutex is also acquired from foreground
-// tasks, which should not be blocked for a long time.
class BackgroundCompileScope {
public:
- explicit BackgroundCompileScope(
- const std::shared_ptr<BackgroundCompileToken>& token)
- : token_(token.get()), native_module_(token->StartScope()) {}
-
- ~BackgroundCompileScope() { token_->ExitScope(); }
-
- bool cancelled() const { return native_module_ == nullptr; }
+ explicit BackgroundCompileScope(std::weak_ptr<NativeModule> native_module)
+ : native_module_(native_module.lock()) {}
- NativeModule* native_module() {
- DCHECK(!cancelled());
+ NativeModule* native_module() const {
+ DCHECK(native_module_);
return native_module_.get();
}
+ inline CompilationStateImpl* compilation_state() const;
- inline CompilationStateImpl* compilation_state();
-
- // Call {SchedulePublishCode} via the {BackgroundCompileScope} to guarantee
- // that the {NativeModule} stays alive.
- void SchedulePublishCode(std::vector<std::unique_ptr<WasmCode>> codes) {
- token_->SchedulePublishCode(native_module_.get(), std::move(codes));
- }
+ bool cancelled() const;
private:
- BackgroundCompileToken* const token_;
// Keep the native module alive while in this scope.
- std::shared_ptr<NativeModule> const native_module_;
+ std::shared_ptr<NativeModule> native_module_;
};
enum CompileBaselineOnly : bool {
@@ -190,33 +109,74 @@ enum CompileBaselineOnly : bool {
// runs empty.
class CompilationUnitQueues {
public:
- explicit CompilationUnitQueues(int max_tasks, int num_declared_functions)
- : queues_(max_tasks), top_tier_priority_units_queues_(max_tasks) {
- DCHECK_LT(0, max_tasks);
- for (int task_id = 0; task_id < max_tasks; ++task_id) {
- queues_[task_id].next_steal_task_id = next_task_id(task_id);
- }
+ // Public API for QueueImpl.
+ struct Queue {
+ bool ShouldPublish(int num_processed_units) const;
+ };
+
+ explicit CompilationUnitQueues(int num_declared_functions)
+ : num_declared_functions_(num_declared_functions) {
+ // Add one first queue, to add units to.
+ queues_.emplace_back(std::make_unique<QueueImpl>(0));
+
for (auto& atomic_counter : num_units_) {
std::atomic_init(&atomic_counter, size_t{0});
}
- treated_ = std::make_unique<std::atomic<bool>[]>(num_declared_functions);
+ top_tier_compiled_ =
+ std::make_unique<std::atomic<bool>[]>(num_declared_functions);
for (int i = 0; i < num_declared_functions; i++) {
- std::atomic_init(&treated_.get()[i], false);
+ std::atomic_init(&top_tier_compiled_.get()[i], false);
}
}
- base::Optional<WasmCompilationUnit> GetNextUnit(
- int task_id, CompileBaselineOnly baseline_only) {
- DCHECK_LE(0, task_id);
- DCHECK_GT(queues_.size(), task_id);
+ Queue* GetQueueForTask(int task_id) {
+ int required_queues = task_id + 1;
+ {
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
+ if (V8_LIKELY(static_cast<int>(queues_.size()) >= required_queues)) {
+ return queues_[task_id].get();
+ }
+ }
+
+ // Otherwise increase the number of queues.
+ base::SharedMutexGuard<base::kExclusive> queues_guard(&queues_mutex_);
+ int num_queues = static_cast<int>(queues_.size());
+ while (num_queues < required_queues) {
+ int steal_from = num_queues + 1;
+ queues_.emplace_back(std::make_unique<QueueImpl>(steal_from));
+ ++num_queues;
+ }
+
+ // Update the {publish_limit}s of all queues.
+
+ // We want background threads to publish regularly (to avoid contention when
+ // they are all publishing at the end). On the other side, each publishing
+ // has some overhead (part of it for synchronizing between threads), so it
+ // should not happen *too* often. Thus aim for 4-8 publishes per thread, but
+ // distribute it such that publishing is likely to happen at different
+ // times.
+ int units_per_thread = num_declared_functions_ / num_queues;
+ int min = std::max(10, units_per_thread / 8);
+ int queue_id = 0;
+ for (auto& queue : queues_) {
+ // Set a limit between {min} and {2*min}, but not smaller than {10}.
+ int limit = min + (min * queue_id / num_queues);
+ queue->publish_limit.store(limit, std::memory_order_relaxed);
+ ++queue_id;
+ }
+
+ return queues_[task_id].get();
+ }
+ base::Optional<WasmCompilationUnit> GetNextUnit(
+ Queue* queue, CompileBaselineOnly baseline_only) {
// As long as any lower-tier units are outstanding we need to steal them
// before executing own higher-tier units.
int max_tier = baseline_only ? kBaseline : kTopTier;
for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
- if (auto unit = GetNextUnitOfTier(task_id, tier)) {
+ if (auto unit = GetNextUnitOfTier(queue, tier)) {
size_t old_units_count =
num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
DCHECK_LE(1, old_units_count);
@@ -233,13 +193,18 @@ class CompilationUnitQueues {
DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
- int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
- while (!next_queue_to_add.compare_exchange_weak(
- queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
- // Retry with updated {queue_to_add}.
+ QueueImpl* queue;
+ {
+ int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
+ while (!next_queue_to_add.compare_exchange_weak(
+ queue_to_add, next_task_id(queue_to_add, queues_.size()),
+ std::memory_order_relaxed)) {
+ // Retry with updated {queue_to_add}.
+ }
+ queue = queues_[queue_to_add].get();
}
- Queue* queue = &queues_[queue_to_add];
base::MutexGuard guard(&queue->mutex);
base::Optional<base::MutexGuard> big_units_guard;
for (auto pair : {std::make_pair(int{kBaseline}, baseline_units),
@@ -265,22 +230,24 @@ class CompilationUnitQueues {
}
void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) {
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing. We use
// the same counter for this reason.
int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
while (!next_queue_to_add.compare_exchange_weak(
- queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
+ queue_to_add, next_task_id(queue_to_add, queues_.size()),
+ std::memory_order_relaxed)) {
// Retry with updated {queue_to_add}.
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[queue_to_add];
- base::MutexGuard guard(&queue->mutex);
-
+ {
+ auto* queue = queues_[queue_to_add].get();
+ base::MutexGuard guard(&queue->mutex);
+ queue->top_tier_priority_units.emplace(priority, unit);
+ }
num_priority_units_.fetch_add(1, std::memory_order_relaxed);
num_units_[kTopTier].fetch_add(1, std::memory_order_relaxed);
- queue->units.emplace(priority, unit);
}
// Get the current total number of units in all queues. This is only a
@@ -304,15 +271,6 @@ class CompilationUnitQueues {
// order of their function body size.
static constexpr size_t kBigUnitsLimit = 4096;
- struct Queue {
- base::Mutex mutex;
-
- // Protected by {mutex}:
- std::vector<WasmCompilationUnit> units[kNumTiers];
- int next_steal_task_id;
- // End of fields protected by {mutex}.
- };
-
struct BigUnit {
BigUnit(size_t func_size, WasmCompilationUnit unit)
: func_size{func_size}, unit(unit) {}
@@ -351,28 +309,27 @@ class CompilationUnitQueues {
std::priority_queue<BigUnit> units[kNumTiers];
};
- struct TopTierPriorityUnitsQueue {
+ struct QueueImpl : public Queue {
+ explicit QueueImpl(int next_steal_task_id)
+ : next_steal_task_id(next_steal_task_id) {}
+
+ // Number of units after which the task processing this queue should publish
+ // compilation results. Updated (reduced, using relaxed ordering) when new
+ // queues are allocated. If there is only one thread running, we can delay
+ // publishing arbitrarily.
+ std::atomic<int> publish_limit{kMaxInt};
+
base::Mutex mutex;
- // Protected by {mutex}:
- std::priority_queue<TopTierPriorityUnit> units;
+ // All fields below are protected by {mutex}.
+ std::vector<WasmCompilationUnit> units[kNumTiers];
+ std::priority_queue<TopTierPriorityUnit> top_tier_priority_units;
int next_steal_task_id;
- // End of fields protected by {mutex}.
};
- std::vector<Queue> queues_;
- BigUnitsQueue big_units_queue_;
-
- std::vector<TopTierPriorityUnitsQueue> top_tier_priority_units_queues_;
-
- std::atomic<size_t> num_units_[kNumTiers];
- std::atomic<size_t> num_priority_units_{0};
- std::unique_ptr<std::atomic<bool>[]> treated_;
- std::atomic<int> next_queue_to_add{0};
-
- int next_task_id(int task_id) const {
+ int next_task_id(int task_id, size_t num_queues) const {
int next = task_id + 1;
- return next == static_cast<int>(queues_.size()) ? 0 : next;
+ return next == static_cast<int>(num_queues) ? 0 : next;
}
int GetLowestTierWithUnits() const {
@@ -382,13 +339,13 @@ class CompilationUnitQueues {
return kNumTiers;
}
- base::Optional<WasmCompilationUnit> GetNextUnitOfTier(int task_id, int tier) {
- Queue* queue = &queues_[task_id];
+ base::Optional<WasmCompilationUnit> GetNextUnitOfTier(Queue* public_queue,
+ int tier) {
+ QueueImpl* queue = static_cast<QueueImpl*>(public_queue);
- // First check whether there is a priority unit. Execute that
- // first.
+ // First check whether there is a priority unit. Execute that first.
if (tier == kTopTier) {
- if (auto unit = GetTopTierPriorityUnit(task_id)) {
+ if (auto unit = GetTopTierPriorityUnit(queue)) {
return unit;
}
}
@@ -411,12 +368,16 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
- return unit;
+ {
+ base::SharedMutexGuard<base::kShared> guard(&queues_mutex_);
+ for (size_t steal_trials = 0; steal_trials < queues_.size();
+ ++steal_trials, ++steal_task_id) {
+ if (steal_task_id >= static_cast<int>(queues_.size())) {
+ steal_task_id = 0;
+ }
+ if (auto unit = StealUnitsAndGetFirst(queue, steal_task_id, tier)) {
+ return unit;
+ }
}
}
@@ -425,7 +386,7 @@ class CompilationUnitQueues {
}
base::Optional<WasmCompilationUnit> GetBigUnitOfTier(int tier) {
- // Fast-path without locking.
+ // Fast path without locking.
if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) {
return {};
}
@@ -439,25 +400,22 @@ class CompilationUnitQueues {
return unit;
}
- base::Optional<WasmCompilationUnit> GetTopTierPriorityUnit(int task_id) {
- // Fast-path without locking.
+ base::Optional<WasmCompilationUnit> GetTopTierPriorityUnit(QueueImpl* queue) {
+ // Fast path without locking.
if (num_priority_units_.load(std::memory_order_relaxed) == 0) {
return {};
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[task_id];
-
int steal_task_id;
{
base::MutexGuard mutex_guard(&queue->mutex);
- while (!queue->units.empty()) {
- auto unit = queue->units.top().unit;
- queue->units.pop();
+ while (!queue->top_tier_priority_units.empty()) {
+ auto unit = queue->top_tier_priority_units.top().unit;
+ queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
- if (!treated_[unit.func_index()].exchange(true,
- std::memory_order_relaxed)) {
+ if (!top_tier_compiled_[unit.func_index()].exchange(
+ true, std::memory_order_relaxed)) {
return unit;
}
num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
@@ -467,28 +425,34 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealTopTierPriorityUnit(task_id, steal_task_id)) {
- return unit;
+ {
+ base::SharedMutexGuard<base::kShared> guard(&queues_mutex_);
+ for (size_t steal_trials = 0; steal_trials < queues_.size();
+ ++steal_trials, ++steal_task_id) {
+ if (steal_task_id >= static_cast<int>(queues_.size())) {
+ steal_task_id = 0;
+ }
+ if (auto unit = StealTopTierPriorityUnit(queue, steal_task_id)) {
+ return unit;
+ }
}
}
return {};
}
- // Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return
+ // Steal units of {wanted_tier} from {steal_from_task_id} to {queue}. Return
// first stolen unit (rest put in queue of {task_id}), or {nullopt} if
// {steal_from_task_id} had no units of {wanted_tier}.
+ // Hold a shared lock on {queues_mutex_} when calling this method.
base::Optional<WasmCompilationUnit> StealUnitsAndGetFirst(
- int task_id, int steal_from_task_id, int wanted_tier) {
- DCHECK_NE(task_id, steal_from_task_id);
+ QueueImpl* queue, int steal_from_task_id, int wanted_tier) {
+ auto* steal_queue = queues_[steal_from_task_id].get();
+ // Cannot steal from own queue.
+ if (steal_queue == queue) return {};
std::vector<WasmCompilationUnit> stolen;
base::Optional<WasmCompilationUnit> returned_unit;
{
- Queue* steal_queue = &queues_[steal_from_task_id];
base::MutexGuard guard(&steal_queue->mutex);
auto* steal_from_vector = &steal_queue->units[wanted_tier];
if (steal_from_vector->empty()) return {};
@@ -498,81 +462,65 @@ class CompilationUnitQueues {
stolen.assign(steal_begin + 1, steal_from_vector->end());
steal_from_vector->erase(steal_begin, steal_from_vector->end());
}
- Queue* queue = &queues_[task_id];
base::MutexGuard guard(&queue->mutex);
auto* target_queue = &queue->units[wanted_tier];
target_queue->insert(target_queue->end(), stolen.begin(), stolen.end());
- queue->next_steal_task_id = next_task_id(steal_from_task_id);
+ queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
// Steal one priority unit from {steal_from_task_id} to {task_id}. Return
// stolen unit, or {nullopt} if {steal_from_task_id} had no priority units.
+ // Hold a shared lock on {queues_mutex_} when calling this method.
base::Optional<WasmCompilationUnit> StealTopTierPriorityUnit(
- int task_id, int steal_from_task_id) {
- DCHECK_NE(task_id, steal_from_task_id);
-
+ QueueImpl* queue, int steal_from_task_id) {
+ auto* steal_queue = queues_[steal_from_task_id].get();
+ // Cannot steal from own queue.
+ if (steal_queue == queue) return {};
base::Optional<WasmCompilationUnit> returned_unit;
{
- TopTierPriorityUnitsQueue* steal_queue =
- &top_tier_priority_units_queues_[steal_from_task_id];
base::MutexGuard guard(&steal_queue->mutex);
while (true) {
- if (steal_queue->units.empty()) return {};
+ if (steal_queue->top_tier_priority_units.empty()) return {};
- auto unit = steal_queue->units.top().unit;
- steal_queue->units.pop();
+ auto unit = steal_queue->top_tier_priority_units.top().unit;
+ steal_queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
- if (!treated_[unit.func_index()].exchange(true,
- std::memory_order_relaxed)) {
+ if (!top_tier_compiled_[unit.func_index()].exchange(
+ true, std::memory_order_relaxed)) {
returned_unit = unit;
break;
}
num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
}
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[task_id];
base::MutexGuard guard(&queue->mutex);
- queue->next_steal_task_id = next_task_id(steal_from_task_id);
+ queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
-};
-
-// {JobHandle} is not thread safe in general (at least both the
-// {DefaultJobHandle} and chromium's {base::JobHandle} are not). Hence, protect
-// concurrent accesses via a mutex.
-class ThreadSafeJobHandle {
- public:
- explicit ThreadSafeJobHandle(std::shared_ptr<JobHandle> job_handle)
- : job_handle_(std::move(job_handle)) {}
- void NotifyConcurrencyIncrease() {
- base::MutexGuard guard(&mutex_);
- job_handle_->NotifyConcurrencyIncrease();
- }
+ // {queues_mutex_} protectes {queues_};
+ base::SharedMutex queues_mutex_;
+ std::vector<std::unique_ptr<QueueImpl>> queues_;
- void Join() {
- base::MutexGuard guard(&mutex_);
- job_handle_->Join();
- }
+ const int num_declared_functions_;
- void Cancel() {
- base::MutexGuard guard(&mutex_);
- job_handle_->Cancel();
- }
-
- bool IsRunning() const {
- base::MutexGuard guard(&mutex_);
- return job_handle_->IsRunning();
- }
+ BigUnitsQueue big_units_queue_;
- private:
- mutable base::Mutex mutex_;
- std::shared_ptr<JobHandle> job_handle_;
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<size_t> num_priority_units_{0};
+ std::unique_ptr<std::atomic<bool>[]> top_tier_compiled_;
+ std::atomic<int> next_queue_to_add{0};
};
+bool CompilationUnitQueues::Queue::ShouldPublish(
+ int num_processed_units) const {
+ auto* queue = static_cast<const QueueImpl*>(this);
+ return num_processed_units >=
+ queue->publish_limit.load(std::memory_order_relaxed);
+}
+
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
@@ -586,6 +534,7 @@ class CompilationStateImpl {
// Cancel all background compilation, without waiting for compile tasks to
// finish.
void CancelCompilation();
+ bool cancelled() const;
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
@@ -618,8 +567,11 @@ class CompilationStateImpl {
js_to_wasm_wrapper_units);
void AddTopTierCompilationUnit(WasmCompilationUnit);
void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t);
+
+ CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
+
base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
- int task_id, CompileBaselineOnly baseline_only);
+ CompilationUnitQueues::Queue*, CompileBaselineOnly);
std::shared_ptr<JSToWasmWrapperCompilationUnit>
GetNextJSToWasmWrapperCompilationUnit();
@@ -629,13 +581,13 @@ class CompilationStateImpl {
void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
- int GetFreeCompileTaskId();
- int GetUnpublishedUnitsLimits(int task_id);
- void OnCompilationStopped(int task_id, const WasmFeatures& detected);
+ void OnCompilationStopped(const WasmFeatures& detected);
void PublishDetectedFeatures(Isolate*);
+ void SchedulePublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code);
// Ensure that a compilation job is running, and increase its concurrency if
// needed.
- void ScheduleCompileJobForNewUnits(int new_units);
+ void ScheduleCompileJobForNewUnits();
size_t NumOutstandingCompilations() const;
@@ -687,8 +639,12 @@ class CompilationStateImpl {
// Hold the {callbacks_mutex_} when calling this method.
void TriggerCallbacks(base::EnumSet<CompilationEvent> additional_events = {});
+ void PublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code);
+ void PublishCode(Vector<std::unique_ptr<WasmCode>> codes);
+
NativeModule* const native_module_;
- const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
+ std::weak_ptr<NativeModule> const native_module_weak_;
const CompileMode compile_mode_;
const std::shared_ptr<Counters> async_counters_;
@@ -696,20 +652,9 @@ class CompilationStateImpl {
// using relaxed semantics.
std::atomic<bool> compile_failed_{false};
- // The atomic counter is shared with the compilation job. It's increased if
- // more units are added, and decreased when the queue drops to zero. Hence
- // it's an approximation of the current number of available units in the
- // queue, but it's not updated after popping a single unit, because that
- // would create too much contention.
- // This counter is not used for synchronization, hence relaxed memory ordering
- // can be used. The thread that increases the counter is the same that calls
- // {NotifyConcurrencyIncrease} later. The only reduction of the counter is a
- // drop to zero after a worker does not find any unit in the queue, and after
- // that drop another check is executed to ensure that any left-over units are
- // still processed.
- std::shared_ptr<std::atomic<int>> scheduled_units_approximation_ =
- std::make_shared<std::atomic<int>>(0);
- const int max_compile_concurrency_ = 0;
+ // True if compilation was cancelled and worker threads should return. This
+ // flag can be updated and read using relaxed semantics.
+ std::atomic<bool> compile_cancelled_{false};
CompilationUnitQueues compilation_unit_queues_;
@@ -729,7 +674,7 @@ class CompilationStateImpl {
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
- std::shared_ptr<ThreadSafeJobHandle> current_compile_job_;
+ std::shared_ptr<JobHandle> current_compile_job_;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
@@ -768,6 +713,11 @@ class CompilationStateImpl {
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
+ // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
+ base::Mutex publish_mutex_;
+ std::vector<std::unique_ptr<WasmCode>> publish_queue_;
+ bool publisher_running_ = false;
+
// Encoding of fields in the {compilation_progress_} vector.
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
@@ -782,21 +732,14 @@ const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
}
-CompilationStateImpl* BackgroundCompileScope::compilation_state() {
- return Impl(native_module()->compilation_state());
+CompilationStateImpl* BackgroundCompileScope::compilation_state() const {
+ DCHECK(native_module_);
+ return Impl(native_module_->compilation_state());
}
-void BackgroundCompileToken::PublishCode(
- NativeModule* native_module, Vector<std::unique_ptr<WasmCode>> code) {
- WasmCodeRefScope code_ref_scope;
- std::vector<WasmCode*> published_code = native_module->PublishCode(code);
- // Defer logging code in case wire bytes were not fully received yet.
- if (native_module->HasWireBytes()) {
- native_module->engine()->LogCode(VectorOf(published_code));
- }
-
- Impl(native_module->compilation_state())
- ->OnFinishedUnits(VectorOf(published_code));
+bool BackgroundCompileScope::cancelled() const {
+ return native_module_ == nullptr ||
+ Impl(native_module_->compilation_state())->cancelled();
}
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
@@ -877,8 +820,9 @@ bool CompilationState::recompilation_finished() const {
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters) {
- return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
- new CompilationStateImpl(native_module, std::move(async_counters))));
+ return std::unique_ptr<CompilationState>(
+ reinterpret_cast<CompilationState*>(new CompilationStateImpl(
+ std::move(native_module), std::move(async_counters))));
}
// End of PIMPL implementation of {CompilationState}.
@@ -1215,31 +1159,31 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
namespace {
void RecordStats(const Code code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(code.body_size());
+ counters->wasm_generated_code_size()->Increment(code.raw_body_size());
counters->wasm_reloc_size()->Increment(code.relocation_info().length());
}
enum CompilationExecutionResult : int8_t { kNoMoreUnits, kYield };
CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
- const std::shared_ptr<BackgroundCompileToken>& token,
- JobDelegate* delegate) {
+ std::weak_ptr<NativeModule> native_module, JobDelegate* delegate) {
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
{
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
if (!wrapper_unit) return kNoMoreUnits;
}
+ TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation");
while (true) {
wrapper_unit->Execute();
++num_processed_wrappers;
bool yield = delegate && delegate->ShouldYield();
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
if (yield ||
!(wrapper_unit = compile_scope.compilation_state()
@@ -1251,16 +1195,35 @@ CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
}
}
+namespace {
+const char* GetCompilationEventName(const WasmCompilationUnit& unit,
+ const CompilationEnv& env) {
+ ExecutionTier tier = unit.tier();
+ if (tier == ExecutionTier::kLiftoff) {
+ return "wasm.BaselineCompilation";
+ }
+ if (tier == ExecutionTier::kTurbofan) {
+ return "wasm.TopTierCompilation";
+ }
+ if (unit.func_index() <
+ static_cast<int>(env.module->num_imported_functions)) {
+ return "wasm.WasmToJSWrapperCompilation";
+ }
+ return "wasm.OtherCompilation";
+}
+} // namespace
+
// Run by the {BackgroundCompileJob} (on any thread).
CompilationExecutionResult ExecuteCompilationUnits(
- const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
+ std::weak_ptr<NativeModule> native_module, Counters* counters,
JobDelegate* delegate, CompileBaselineOnly baseline_only) {
TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
// Execute JS to Wasm wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
// triggered.
- if (ExecuteJSToWasmWrapperCompilationUnits(token, delegate) == kYield) {
+ if (ExecuteJSToWasmWrapperCompilationUnits(native_module, delegate) ==
+ kYield) {
return kYield;
}
@@ -1270,108 +1233,65 @@ CompilationExecutionResult ExecuteCompilationUnits(
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
WasmEngine* wasm_engine;
- // The Jobs API guarantees that {GetTaskId} is less than the number of
- // workers, and that the number of workers is less than or equal to the max
- // compile concurrency, which makes the task_id safe to use as an index into
- // the worker queues.
- int task_id = delegate ? delegate->GetTaskId() : 0;
- int unpublished_units_limit;
+ // Task 0 is any main thread (there might be multiple from multiple isolates),
+ // worker threads start at 1 (thus the "+ 1").
+ int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : 0;
+ DCHECK_LE(0, task_id);
+ CompilationUnitQueues::Queue* queue;
base::Optional<WasmCompilationUnit> unit;
WasmFeatures detected_features = WasmFeatures::None();
- auto stop = [&detected_features,
- task_id](BackgroundCompileScope& compile_scope) {
- compile_scope.compilation_state()->OnCompilationStopped(task_id,
- detected_features);
- };
-
// Preparation (synchronized): Initialize the fields above and get the first
// compilation unit.
{
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
auto* compilation_state = compile_scope.compilation_state();
env.emplace(compile_scope.native_module()->CreateCompilationEnv());
wire_bytes = compilation_state->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
wasm_engine = compile_scope.native_module()->engine();
- unpublished_units_limit =
- compilation_state->GetUnpublishedUnitsLimits(task_id);
- unit = compilation_state->GetNextCompilationUnit(task_id, baseline_only);
- if (!unit) {
- stop(compile_scope);
- return kNoMoreUnits;
- }
+ queue = compilation_state->GetQueueForCompileTask(task_id);
+ unit = compilation_state->GetNextCompilationUnit(queue, baseline_only);
+ if (!unit) return kNoMoreUnits;
}
TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id);
std::vector<WasmCompilationResult> results_to_publish;
-
- auto publish_results = [&results_to_publish](
- BackgroundCompileScope* compile_scope) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.PublishCompilationResults", "num_results",
- results_to_publish.size());
- if (results_to_publish.empty()) return;
- std::vector<std::unique_ptr<WasmCode>> unpublished_code =
- compile_scope->native_module()->AddCompiledCode(
- VectorOf(results_to_publish));
- results_to_publish.clear();
-
- // For import wrapper compilation units, add result to the cache.
- const NativeModule* native_module = compile_scope->native_module();
- int num_imported_functions = native_module->num_imported_functions();
- WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
- for (const auto& code : unpublished_code) {
- int func_index = code->index();
- DCHECK_LE(0, func_index);
- DCHECK_LT(func_index, native_module->num_functions());
- if (func_index < num_imported_functions) {
- const FunctionSig* sig =
- native_module->module()->functions[func_index].sig;
- WasmImportWrapperCache::CacheKey key(
- compiler::kDefaultImportCallKind, sig,
- static_cast<int>(sig->parameter_count()));
- // If two imported functions have the same key, only one of them should
- // have been added as a compilation unit. So it is always the first time
- // we compile a wrapper for this key here.
- DCHECK_NULL((*cache)[key]);
- (*cache)[key] = code.get();
- code->IncRef();
- }
- }
-
- compile_scope->SchedulePublishCode(std::move(unpublished_code));
- };
-
- bool compilation_failed = false;
while (true) {
- // (asynchronous): Execute the compilation.
- WasmCompilationResult result = unit->ExecuteCompilation(
- wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
- results_to_publish.emplace_back(std::move(result));
-
- bool yield = delegate && delegate->ShouldYield();
-
- // (synchronized): Publish the compilation result and get the next unit.
- {
- BackgroundCompileScope compile_scope(token);
+ ExecutionTier current_tier = unit->tier();
+ const char* event_name = GetCompilationEventName(unit.value(), env.value());
+ TRACE_EVENT0("v8.wasm", event_name);
+ while (unit->tier() == current_tier) {
+ // (asynchronous): Execute the compilation.
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
+ results_to_publish.emplace_back(std::move(result));
+
+ bool yield = delegate && delegate->ShouldYield();
+
+ // (synchronized): Publish the compilation result and get the next unit.
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
+
if (!results_to_publish.back().succeeded()) {
- // Compile error.
compile_scope.compilation_state()->SetError();
- stop(compile_scope);
- compilation_failed = true;
- break;
+ return kNoMoreUnits;
}
- // Get next unit.
+ // Yield or get next unit.
if (yield ||
!(unit = compile_scope.compilation_state()->GetNextCompilationUnit(
- task_id, baseline_only))) {
- publish_results(&compile_scope);
- stop(compile_scope);
+ queue, baseline_only))) {
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
+ compile_scope.native_module()->AddCompiledCode(
+ VectorOf(std::move(results_to_publish)));
+ results_to_publish.clear();
+ compile_scope.compilation_state()->SchedulePublishCompilationResults(
+ std::move(unpublished_code));
+ compile_scope.compilation_state()->OnCompilationStopped(
+ detected_features);
return yield ? kYield : kNoMoreUnits;
}
@@ -1382,17 +1302,17 @@ CompilationExecutionResult ExecuteCompilationUnits(
// Also publish after finishing a certain amount of units, to avoid
// contention when all threads publish at the end.
if (unit->tier() == ExecutionTier::kTurbofan ||
- static_cast<int>(results_to_publish.size()) >=
- unpublished_units_limit) {
- publish_results(&compile_scope);
+ queue->ShouldPublish(static_cast<int>(results_to_publish.size()))) {
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
+ compile_scope.native_module()->AddCompiledCode(
+ VectorOf(std::move(results_to_publish)));
+ results_to_publish.clear();
+ compile_scope.compilation_state()->SchedulePublishCompilationResults(
+ std::move(unpublished_code));
}
}
}
- // We only get here if compilation failed. Other exits return directly.
- DCHECK(compilation_failed);
- USE(compilation_failed);
- token->Cancel();
- return kNoMoreUnits;
+ UNREACHABLE();
}
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
@@ -1410,7 +1330,8 @@ int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
isolate, wasm_engine, function.sig, native_module->module(),
- function.imported, enabled_features);
+ function.imported, enabled_features,
+ JSToWasmWrapperCompilationUnit::kAllowGeneric);
builder->AddJSToWasmWrapperUnit(std::move(unit));
}
}
@@ -1529,6 +1450,7 @@ class CompilationTimeCallback {
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
}
+ // TODO(sartang@microsoft.com): Remove wall_clock_time_in_us field
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -1538,7 +1460,8 @@ class CompilationTimeCallback {
true, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1549,7 +1472,8 @@ class CompilationTimeCallback {
v8::metrics::WasmModuleTieredUp event{
FLAG_wasm_lazy_compilation, // lazy
native_module->turbofan_code_size(), // code_size_in_bytes
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1563,7 +1487,8 @@ class CompilationTimeCallback {
false, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1646,55 +1571,33 @@ void CompileNativeModule(Isolate* isolate,
}
}
-// The runnable task that performs compilations in the background.
-class BackgroundCompileJob : public JobTask {
+class BackgroundCompileJob final : public JobTask {
public:
- explicit BackgroundCompileJob(
- std::shared_ptr<BackgroundCompileToken> token,
- std::shared_ptr<Counters> async_counters,
- std::shared_ptr<std::atomic<int>> scheduled_units_approximation,
- size_t max_concurrency)
- : token_(std::move(token)),
- async_counters_(std::move(async_counters)),
- scheduled_units_approximation_(
- std::move(scheduled_units_approximation)),
- max_concurrency_(max_concurrency) {}
+ explicit BackgroundCompileJob(std::weak_ptr<NativeModule> native_module,
+ std::shared_ptr<Counters> async_counters)
+ : native_module_(std::move(native_module)),
+ async_counters_(std::move(async_counters)) {}
void Run(JobDelegate* delegate) override {
- if (ExecuteCompilationUnits(token_, async_counters_.get(), delegate,
- kBaselineOrTopTier) == kYield) {
- return;
- }
- // Otherwise we didn't find any more units to execute. Reduce the atomic
- // counter of the approximated number of available units to zero, but then
- // check whether any more units were added in the meantime, and increase
- // back if necessary.
- scheduled_units_approximation_->store(0, std::memory_order_relaxed);
-
- BackgroundCompileScope scope(token_);
- if (scope.cancelled()) return;
- size_t outstanding_units =
- scope.compilation_state()->NumOutstandingCompilations();
- if (outstanding_units == 0) return;
- // On a race between this thread and the thread which scheduled the units,
- // this might increase concurrency more than needed, which is fine. It
- // will be reduced again when the first task finds no more work to do.
- scope.compilation_state()->ScheduleCompileJobForNewUnits(
- static_cast<int>(outstanding_units));
+ ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate,
+ kBaselineOrTopTier);
}
size_t GetMaxConcurrency(size_t worker_count) const override {
- // {current_concurrency_} does not reflect the units that running workers
- // are processing, thus add the current worker count to that number.
- return std::min(max_concurrency_,
- worker_count + scheduled_units_approximation_->load());
+ BackgroundCompileScope scope(native_module_);
+ if (scope.cancelled()) return 0;
+ // NumOutstandingCompilations() does not reflect the units that running
+ // workers are processing, thus add the current worker count to that number.
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ return std::min(
+ flag_limit,
+ worker_count + scope.compilation_state()->NumOutstandingCompilations());
}
private:
- const std::shared_ptr<BackgroundCompileToken> token_;
+ const std::weak_ptr<NativeModule> native_module_;
const std::shared_ptr<Counters> async_counters_;
- const std::shared_ptr<std::atomic<int>> scheduled_units_approximation_;
- const size_t max_concurrency_;
};
} // namespace
@@ -1974,7 +1877,8 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
!compilation_state->failed(), // success
native_module_->liftoff_code_size(), // code_size_in_bytes
native_module_->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_);
}
@@ -2489,6 +2393,7 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
job_->metrics_event_.wall_clock_time_in_us = duration.InMicroseconds();
+ job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
@@ -2580,6 +2485,8 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
return false;
}
+ decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
+
prefix_hash_ = base::hash_combine(prefix_hash_,
static_cast<uint32_t>(code_section_length));
if (!wasm_engine_->GetStreamingCompilationOwnership(prefix_hash_)) {
@@ -2601,7 +2508,6 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false, code_size_estimate);
- decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
@@ -2710,6 +2616,7 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
job_->metrics_event_.wall_clock_time_in_us = duration.InMicroseconds();
+ job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
@@ -2804,37 +2711,31 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
-// TODO(wasm): Try to avoid the {NumberOfWorkerThreads} calls, grow queues
-// dynamically instead.
-int GetMaxCompileConcurrency() {
- int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
- return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
-}
-
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters)
: native_module_(native_module.get()),
- background_compile_token_(
- std::make_shared<BackgroundCompileToken>(native_module)),
+ native_module_weak_(std::move(native_module)),
compile_mode_(FLAG_wasm_tier_up &&
native_module->module()->origin == kWasmOrigin
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_compile_concurrency_(std::max(GetMaxCompileConcurrency(), 1)),
- // Add one to the allowed number of parallel tasks, because the foreground
- // task sometimes also contributes.
- compilation_unit_queues_(max_compile_concurrency_ + 1,
- native_module->num_functions()) {}
+ compilation_unit_queues_(native_module->num_functions()) {}
void CompilationStateImpl::CancelCompilation() {
- background_compile_token_->Cancel();
// No more callbacks after abort.
base::MutexGuard callbacks_guard(&callbacks_mutex_);
+ // std::memory_order_relaxed is sufficient because no other state is
+ // synchronized with |compile_cancelled_|.
+ compile_cancelled_.store(true, std::memory_order_relaxed);
callbacks_.clear();
}
+bool CompilationStateImpl::cancelled() const {
+ return compile_cancelled_.load(std::memory_order_relaxed);
+}
+
void CompilationStateImpl::InitializeCompilationProgress(
bool lazy_module, int num_import_wrappers, int num_export_wrappers) {
DCHECK(!failed());
@@ -2909,6 +2810,9 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization() {
RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
ReachedTierField::encode(ExecutionTier::kTurbofan);
+ finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
+ finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
+ finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
compilation_progress_.assign(module->num_declared_functions,
kProgressAfterDeserialization);
}
@@ -2956,7 +2860,9 @@ void CompilationStateImpl::InitializeRecompilation(
// start yet, and new code will be kept tiered-down from the start. For
// streaming compilation, there is a special path to tier down later, when
// the module is complete. In any case, we don't need to recompile here.
+ base::Optional<CompilationUnitBuilder> builder;
if (compilation_progress_.size() > 0) {
+ builder.emplace(native_module_);
const WasmModule* module = native_module_->module();
DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
DCHECK_GE(module->num_declared_functions,
@@ -2971,15 +2877,13 @@ void CompilationStateImpl::InitializeRecompilation(
: ExecutionTier::kTurbofan;
int imported = module->num_imported_functions;
// Generate necessary compilation units on the fly.
- CompilationUnitBuilder builder(native_module_);
for (int function_index : recompile_function_indexes) {
DCHECK_LE(imported, function_index);
int slot_index = function_index - imported;
auto& progress = compilation_progress_[slot_index];
progress = MissingRecompilationField::update(progress, true);
- builder.AddRecompilationUnit(function_index, new_tier);
+ builder->AddRecompilationUnit(function_index, new_tier);
}
- builder.Commit();
}
// Trigger callback if module needs no recompilation.
@@ -2987,6 +2891,12 @@ void CompilationStateImpl::InitializeRecompilation(
TriggerCallbacks(base::EnumSet<CompilationEvent>(
{CompilationEvent::kFinishedRecompilation}));
}
+
+ if (builder.has_value()) {
+ // Avoid holding lock while scheduling a compile job.
+ guard.reset();
+ builder->Commit();
+ }
}
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
@@ -3017,13 +2927,15 @@ void CompilationStateImpl::AddCompilationUnits(
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
}
- js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
- js_to_wasm_wrapper_units.begin(),
- js_to_wasm_wrapper_units.end());
-
- size_t total_units = baseline_units.size() + top_tier_units.size() +
- js_to_wasm_wrapper_units.size();
- ScheduleCompileJobForNewUnits(static_cast<int>(total_units));
+ if (!js_to_wasm_wrapper_units.empty()) {
+ // |js_to_wasm_wrapper_units_| can only be modified before background
+ // compilation started.
+ DCHECK(!current_compile_job_ || !current_compile_job_->IsRunning());
+ js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
+ js_to_wasm_wrapper_units.begin(),
+ js_to_wasm_wrapper_units.end());
+ }
+ ScheduleCompileJobForNewUnits();
}
void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
@@ -3033,7 +2945,7 @@ void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
WasmCompilationUnit unit, size_t priority) {
compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
- ScheduleCompileJobForNewUnits(1);
+ ScheduleCompileJobForNewUnits();
}
std::shared_ptr<JSToWasmWrapperCompilationUnit>
@@ -3055,7 +2967,7 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.FinalizeJSToWasmWrappers", "num_wrappers",
+ "wasm.FinalizeJSToWasmWrappers", "wrappers",
js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
@@ -3067,15 +2979,20 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
}
}
+CompilationUnitQueues::Queue* CompilationStateImpl::GetQueueForCompileTask(
+ int task_id) {
+ return compilation_unit_queues_.GetQueueForTask(task_id);
+}
+
base::Optional<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit(
- int task_id, CompileBaselineOnly baseline_only) {
- return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
+ CompilationUnitQueues::Queue* queue, CompileBaselineOnly baseline_only) {
+ return compilation_unit_queues_.GetNextUnit(queue, baseline_only);
}
void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.OnFinishedUnits", "num_units", code_vector.size());
+ "wasm.OnFinishedUnits", "units", code_vector.size());
base::MutexGuard guard(&callbacks_mutex_);
@@ -3230,24 +3147,7 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
-int CompilationStateImpl::GetUnpublishedUnitsLimits(int task_id) {
- // We want background threads to publish regularly (to avoid contention when
- // they are all publishing at the end). On the other side, each publishing has
- // some overhead (part of it for synchronizing between threads), so it should
- // not happen *too* often.
- // Thus aim for 4-8 publishes per thread, but distribute it such that
- // publishing is likely to happen at different times.
- int units_per_thread =
- static_cast<int>(native_module_->module()->num_declared_functions /
- max_compile_concurrency_);
- int min = units_per_thread / 8;
- // Return something between {min} and {2*min}, but not smaller than {10}.
- return std::max(10, min + (min * task_id / max_compile_concurrency_));
-}
-
-void CompilationStateImpl::OnCompilationStopped(int task_id,
- const WasmFeatures& detected) {
- DCHECK_GE(max_compile_concurrency_, task_id);
+void CompilationStateImpl::OnCompilationStopped(const WasmFeatures& detected) {
base::MutexGuard guard(&mutex_);
detected_features_.Add(detected);
}
@@ -3260,40 +3160,104 @@ void CompilationStateImpl::PublishDetectedFeatures(Isolate* isolate) {
UpdateFeatureUseCounts(isolate, detected_features_);
}
-void CompilationStateImpl::ScheduleCompileJobForNewUnits(int new_units) {
- // Increase the {scheduled_units_approximation_} counter and remember the old
- // value to check whether it increased towards {max_compile_concurrency_}.
- // In that case, we need to notify the compile job about the increased
- // concurrency.
- DCHECK_LT(0, new_units);
- int old_units = scheduled_units_approximation_->fetch_add(
- new_units, std::memory_order_relaxed);
- bool concurrency_increased = old_units < max_compile_concurrency_;
+void CompilationStateImpl::PublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code) {
+ if (unpublished_code.empty()) return;
- base::MutexGuard guard(&mutex_);
- if (current_compile_job_ && current_compile_job_->IsRunning()) {
- if (concurrency_increased) {
- current_compile_job_->NotifyConcurrencyIncrease();
+ // For import wrapper compilation units, add result to the cache.
+ int num_imported_functions = native_module_->num_imported_functions();
+ WasmImportWrapperCache* cache = native_module_->import_wrapper_cache();
+ for (const auto& code : unpublished_code) {
+ int func_index = code->index();
+ DCHECK_LE(0, func_index);
+ DCHECK_LT(func_index, native_module_->num_functions());
+ if (func_index < num_imported_functions) {
+ const FunctionSig* sig =
+ native_module_->module()->functions[func_index].sig;
+ WasmImportWrapperCache::CacheKey key(
+ compiler::kDefaultImportCallKind, sig,
+ static_cast<int>(sig->parameter_count()));
+ // If two imported functions have the same key, only one of them should
+ // have been added as a compilation unit. So it is always the first time
+ // we compile a wrapper for this key here.
+ DCHECK_NULL((*cache)[key]);
+ (*cache)[key] = code.get();
+ code->IncRef();
}
- return;
}
+ PublishCode(VectorOf(unpublished_code));
+}
+
+void CompilationStateImpl::PublishCode(Vector<std::unique_ptr<WasmCode>> code) {
+ WasmCodeRefScope code_ref_scope;
+ std::vector<WasmCode*> published_code =
+ native_module_->PublishCode(std::move(code));
+ // Defer logging code in case wire bytes were not fully received yet.
+ if (native_module_->HasWireBytes()) {
+ native_module_->engine()->LogCode(VectorOf(published_code));
+ }
+
+ OnFinishedUnits(VectorOf(std::move(published_code)));
+}
+
+void CompilationStateImpl::SchedulePublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code) {
+ {
+ base::MutexGuard guard(&publish_mutex_);
+ if (publisher_running_) {
+ // Add new code to the queue and return.
+ publish_queue_.reserve(publish_queue_.size() + unpublished_code.size());
+ for (auto& c : unpublished_code) {
+ publish_queue_.emplace_back(std::move(c));
+ }
+ return;
+ }
+ publisher_running_ = true;
+ }
+ while (true) {
+ PublishCompilationResults(std::move(unpublished_code));
+ unpublished_code.clear();
+
+ // Keep publishing new code that came in.
+ base::MutexGuard guard(&publish_mutex_);
+ DCHECK(publisher_running_);
+ if (publish_queue_.empty()) {
+ publisher_running_ = false;
+ return;
+ }
+ unpublished_code.swap(publish_queue_);
+ }
+}
+
+void CompilationStateImpl::ScheduleCompileJobForNewUnits() {
if (failed()) return;
- std::unique_ptr<JobTask> new_compile_job =
- std::make_unique<BackgroundCompileJob>(
- background_compile_token_, async_counters_,
- scheduled_units_approximation_, max_compile_concurrency_);
- // TODO(wasm): Lower priority for TurboFan-only jobs.
- std::shared_ptr<JobHandle> handle = V8::GetCurrentPlatform()->PostJob(
- has_priority_ ? TaskPriority::kUserBlocking : TaskPriority::kUserVisible,
- std::move(new_compile_job));
- native_module_->engine()->ShepherdCompileJobHandle(handle);
- current_compile_job_ =
- std::make_unique<ThreadSafeJobHandle>(std::move(handle));
+ std::shared_ptr<JobHandle> new_job_handle;
+ {
+ base::MutexGuard guard(&mutex_);
+ if (current_compile_job_ && current_compile_job_->IsValid()) {
+ current_compile_job_->NotifyConcurrencyIncrease();
+ return;
+ }
+
+ std::unique_ptr<JobTask> new_compile_job =
+ std::make_unique<BackgroundCompileJob>(native_module_weak_,
+ async_counters_);
+ // TODO(wasm): Lower priority for TurboFan-only jobs.
+ new_job_handle = V8::GetCurrentPlatform()->PostJob(
+ has_priority_ ? TaskPriority::kUserBlocking
+ : TaskPriority::kUserVisible,
+ std::move(new_compile_job));
+ current_compile_job_ = new_job_handle;
+ // Reset the priority. Later uses of the compilation state, e.g. for
+ // debugging, should compile with the default priority again.
+ has_priority_ = false;
+ }
- // Reset the priority. Later uses of the compilation state, e.g. for
- // debugging, should compile with the default priority again.
- has_priority_ = false;
+ if (new_job_handle) {
+ native_module_->engine()->ShepherdCompileJobHandle(
+ std::move(new_job_handle));
+ }
}
size_t CompilationStateImpl::NumOutstandingCompilations() const {
@@ -3307,12 +3271,14 @@ size_t CompilationStateImpl::NumOutstandingCompilations() const {
}
void CompilationStateImpl::SetError() {
+ compile_cancelled_.store(true, std::memory_order_relaxed);
if (compile_failed_.exchange(true, std::memory_order_relaxed)) {
return; // Already failed before.
}
base::MutexGuard callbacks_guard(&callbacks_mutex_);
TriggerCallbacks();
+ callbacks_.clear();
}
void CompilationStateImpl::WaitForCompilationEvent(
@@ -3330,7 +3296,7 @@ void CompilationStateImpl::WaitForCompilationEvent(
}
constexpr JobDelegate* kNoDelegate = nullptr;
- ExecuteCompilationUnits(background_compile_token_, async_counters_.get(),
+ ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
kNoDelegate, kBaselineOnly);
compilation_event_semaphore->Wait();
}
@@ -3350,7 +3316,6 @@ class CompileJSToWasmWrapperJob final : public JobTask {
size_t max_concurrency)
: queue_(queue),
compilation_units_(compilation_units),
- max_concurrency_(max_concurrency),
outstanding_units_(queue->size()) {}
void Run(JobDelegate* delegate) override {
@@ -3366,14 +3331,15 @@ class CompileJSToWasmWrapperJob final : public JobTask {
// {outstanding_units_} includes the units that other workers are currently
// working on, so we can safely ignore the {worker_count} and just return
// the current number of outstanding units.
- return std::min(max_concurrency_,
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ return std::min(flag_limit,
outstanding_units_.load(std::memory_order_relaxed));
}
private:
JSToWasmWrapperQueue* const queue_;
JSToWasmWrapperUnitMap* const compilation_units_;
- const size_t max_concurrency_;
std::atomic<size_t> outstanding_units_;
};
} // namespace
@@ -3395,7 +3361,8 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
if (queue.insert(key)) {
auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, isolate->wasm_engine(), function.sig, module,
- function.imported, enabled_features);
+ function.imported, enabled_features,
+ JSToWasmWrapperCompilationUnit::kAllowGeneric);
compilation_units.emplace(key, std::move(unit));
}
}
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 6206d11986..e688bb9479 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -68,9 +68,6 @@ bool CompileLazy(Isolate*, NativeModule*, int func_index);
void TriggerTierUp(Isolate*, NativeModule*, int func_index);
-// Get the maximum concurrency for parallel compilation.
-int GetMaxCompileConcurrency();
-
template <typename Key, typename Hash>
class WrapperQueue {
public:
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index dea4e1cb69..6d684d3534 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -635,7 +635,8 @@ class ModuleDecoderImpl : public Decoder {
case kExternalMemory: {
// ===== Imported memory =============================================
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64);
consume_resizable_limits("memory", "pages", max_mem_pages(),
&module_->initial_pages,
&module_->has_maximum_pages, max_mem_pages(),
@@ -735,7 +736,8 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < memory_count; i++) {
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64);
consume_resizable_limits("memory", "pages", max_mem_pages(),
&module_->initial_pages,
&module_->has_maximum_pages, max_mem_pages(),
@@ -1531,7 +1533,7 @@ class ModuleDecoderImpl : public Decoder {
return flags;
}
- uint8_t validate_memory_flags(bool* has_shared_memory) {
+ uint8_t validate_memory_flags(bool* has_shared_memory, bool* is_memory64) {
uint8_t flags = consume_u8("memory limits flags");
*has_shared_memory = false;
switch (flags) {
@@ -1542,8 +1544,9 @@ class ModuleDecoderImpl : public Decoder {
case kSharedWithMaximum:
if (!enabled_features_.has_threads()) {
errorf(pc() - 1,
- "invalid memory limits flags (enable via "
- "--experimental-wasm-threads)");
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-threads)",
+ flags);
}
*has_shared_memory = true;
// V8 does not support shared memory without a maximum.
@@ -1557,9 +1560,14 @@ class ModuleDecoderImpl : public Decoder {
case kMemory64WithMaximum:
if (!enabled_features_.has_memory64()) {
errorf(pc() - 1,
- "invalid memory limits flags (enable via "
- "--experimental-wasm-memory64)");
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-memory64)",
+ flags);
}
+ *is_memory64 = true;
+ break;
+ default:
+ errorf(pc() - 1, "invalid memory limits flags 0x%x", flags);
break;
}
return flags;
@@ -1618,7 +1626,8 @@ class ModuleDecoderImpl : public Decoder {
// TODO(manoskouk): This is copy-modified from function-body-decoder-impl.h.
// We should find a way to share this code.
- V8_INLINE bool Validate(const byte* pc, HeapTypeImmediate<kValidate>& imm) {
+ V8_INLINE bool Validate(const byte* pc,
+ HeapTypeImmediate<kFullValidation>& imm) {
if (V8_UNLIKELY(imm.type.is_bottom())) {
error(pc, "invalid heap type");
return false;
@@ -1633,7 +1642,7 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr consume_init_expr(WasmModule* module, ValueType expected,
size_t current_global_index) {
- constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
WasmOpcode opcode = kExprNop;
std::vector<WasmInitExpr> stack;
while (pc() < end() && opcode != kExprEnd) {
@@ -1670,25 +1679,25 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprI32Const: {
- ImmI32Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmI32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprF32Const: {
- ImmF32Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmF32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprI64Const: {
- ImmI64Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmI64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprF64Const: {
- ImmF64Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmF64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
@@ -1702,8 +1711,8 @@ class ModuleDecoderImpl : public Decoder {
kExprRefNull);
return {};
}
- HeapTypeImmediate<Decoder::kValidate> imm(enabled_features_, this,
- pc() + 1);
+ HeapTypeImmediate<Decoder::kFullValidation> imm(enabled_features_,
+ this, pc() + 1);
len = 1 + imm.length;
if (!Validate(pc() + 1, imm)) return {};
stack.push_back(
@@ -1719,7 +1728,7 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() + 1);
+ FunctionIndexImmediate<Decoder::kFullValidation> imm(this, pc() + 1);
len = 1 + imm.length;
if (V8_UNLIKELY(module->functions.size() <= imm.index)) {
errorf(pc(), "invalid function index: %u", imm.index);
@@ -1741,8 +1750,8 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- Simd128Immediate<validate> imm(this, pc() + len + 1);
- len += 1 + kSimd128Size;
+ Simd128Immediate<validate> imm(this, pc() + len);
+ len += kSimd128Size;
stack.emplace_back(imm.value);
break;
}
@@ -1755,8 +1764,8 @@ class ModuleDecoderImpl : public Decoder {
case kExprRttCanon: {
HeapTypeImmediate<validate> imm(enabled_features_, this,
pc() + 2);
- len += 1 + imm.length;
- if (!Validate(pc() + 2, imm)) return {};
+ len += imm.length;
+ if (!Validate(pc() + len, imm)) return {};
stack.push_back(
WasmInitExpr::RttCanon(imm.type.representation()));
break;
@@ -1764,8 +1773,8 @@ class ModuleDecoderImpl : public Decoder {
case kExprRttSub: {
HeapTypeImmediate<validate> imm(enabled_features_, this,
pc() + 2);
- len += 1 + imm.length;
- if (!Validate(pc() + 2, imm)) return {};
+ len += imm.length;
+ if (!Validate(pc() + len, imm)) return {};
if (stack.empty()) {
error(pc(), "calling rtt.sub without arguments");
return {};
@@ -1836,7 +1845,7 @@ class ModuleDecoderImpl : public Decoder {
ValueType consume_value_type() {
uint32_t type_length;
- ValueType result = value_type_reader::read_value_type<kValidate>(
+ ValueType result = value_type_reader::read_value_type<kFullValidation>(
this, this->pc(), &type_length,
origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
if (result == kWasmBottom) error(pc_, "invalid value type");
@@ -1850,7 +1859,7 @@ class ModuleDecoderImpl : public Decoder {
}
ValueType consume_storage_type() {
- uint8_t opcode = read_u8<kValidate>(this->pc());
+ uint8_t opcode = read_u8<kFullValidation>(this->pc());
switch (opcode) {
case kI8Code:
consume_bytes(1, "i8");
@@ -1961,10 +1970,10 @@ class ModuleDecoderImpl : public Decoder {
ValueType* type, uint32_t* table_index,
WasmInitExpr* offset) {
const byte* pos = pc();
- uint8_t flag;
+ uint32_t flag;
if (enabled_features_.has_bulk_memory() ||
enabled_features_.has_reftypes()) {
- flag = consume_u8("flag");
+ flag = consume_u32v("flag");
} else {
uint32_t table_index = consume_u32v("table index");
// The only valid flag value without bulk_memory or externref is '0'.
@@ -2133,7 +2142,8 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) return index;
switch (opcode) {
case kExprRefNull: {
- HeapTypeImmediate<kValidate> imm(WasmFeatures::All(), this, this->pc());
+ HeapTypeImmediate<kFullValidation> imm(WasmFeatures::All(), this,
+ this->pc());
consume_bytes(imm.length, "ref.null immediate");
index = WasmElemSegment::kNullIndex;
break;
@@ -2172,13 +2182,14 @@ ModuleResult DecodeWasmModule(
// as the {module}.
ModuleDecoderImpl decoder(enabled, module_start, module_end, origin);
v8::metrics::WasmModuleDecoded metrics_event;
- metrics::TimedScope<v8::metrics::WasmModuleDecoded> metrics_event_scope(
- &metrics_event, &v8::metrics::WasmModuleDecoded::wall_clock_time_in_us);
+ base::ElapsedTimer timer;
+ timer.Start();
ModuleResult result =
decoder.DecodeModule(counters, allocator, verify_functions);
// Record event metrics.
- metrics_event_scope.Stop();
+ metrics_event.wall_clock_duration_in_us = timer.Elapsed().InMicroseconds();
+ timer.Stop();
metrics_event.success = decoder.ok() && result.ok();
metrics_event.async = decoding_method == DecodingMethod::kAsync ||
decoding_method == DecodingMethod::kAsyncStream;
@@ -2438,14 +2449,8 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
// Extract from export table.
for (const WasmExport& exp : export_table) {
- switch (exp.kind) {
- case kExternalFunction:
- if (names->count(exp.index) == 0) {
- names->insert(std::make_pair(exp.index, exp.name));
- }
- break;
- default:
- break;
+ if (exp.kind == kExternalFunction && names->count(exp.index) == 0) {
+ names->insert(std::make_pair(exp.index, exp.name));
}
}
}
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index d31bafb294..e8b0a4f8e6 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -58,25 +58,32 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
WasmImportWrapperCache::CacheKeyHash>;
-class CompileImportWrapperTask final : public CancelableTask {
+class CompileImportWrapperJob final : public JobTask {
public:
- CompileImportWrapperTask(
- CancelableTaskManager* task_manager, WasmEngine* engine,
- Counters* counters, NativeModule* native_module,
+ CompileImportWrapperJob(
+ WasmEngine* engine, Counters* counters, NativeModule* native_module,
ImportWrapperQueue* queue,
WasmImportWrapperCache::ModificationScope* cache_scope)
- : CancelableTask(task_manager),
- engine_(engine),
+ : engine_(engine),
counters_(counters),
native_module_(native_module),
queue_(queue),
cache_scope_(cache_scope) {}
- void RunInternal() override {
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ // Add {worker_count} to the queue size because workers might still be
+ // processing units that have already been popped from the queue.
+ return std::min(flag_limit, worker_count + queue_->size());
+ }
+
+ void Run(JobDelegate* delegate) override {
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
CompileImportWrapper(engine_, native_module_, counters_, key->kind,
key->signature, key->expected_arity, cache_scope_);
+ if (delegate->ShouldYield()) return;
}
}
@@ -410,10 +417,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm_instantiate, module_time));
v8::metrics::WasmModuleInstantiated wasm_module_instantiated;
- metrics::TimedScope<v8::metrics::WasmModuleInstantiated>
- wasm_module_instantiated_timed_scope(
- &wasm_module_instantiated,
- &v8::metrics::WasmModuleInstantiated::wall_clock_time_in_us);
+ base::ElapsedTimer timer;
+ timer.Start();
NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
@@ -745,7 +750,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("Successfully built instance for module %p\n",
module_object_->native_module());
wasm_module_instantiated.success = true;
- wasm_module_instantiated_timed_scope.Stop();
+ wasm_module_instantiated.wall_clock_duration_in_us =
+ timer.Elapsed().InMicroseconds();
+ timer.Stop();
isolate_->metrics_recorder()->DelayMainThreadEvent(wasm_module_instantiated,
context_id_);
return instance;
@@ -1074,8 +1081,7 @@ bool InstanceBuilder::ProcessImportedFunction(
// The imported function is a callable.
int expected_arity = static_cast<int>(expected_sig->parameter_count());
- if (kind ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor) {
+ if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
expected_arity = shared.internal_formal_parameter_count();
@@ -1450,7 +1456,7 @@ void InstanceBuilder::CompileImportWrappers(
int expected_arity = static_cast<int>(sig->parameter_count());
if (resolved.first ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor) {
+ compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
expected_arity = shared.internal_formal_parameter_count();
@@ -1464,24 +1470,14 @@ void InstanceBuilder::CompileImportWrappers(
import_wrapper_queue.insert(key);
}
- CancelableTaskManager task_manager;
- // TODO(wasm): Switch this to the Jobs API.
- const int max_background_tasks = GetMaxCompileConcurrency();
- for (int i = 0; i < max_background_tasks; ++i) {
- auto task = std::make_unique<CompileImportWrapperTask>(
- &task_manager, isolate_->wasm_engine(), isolate_->counters(),
- native_module, &import_wrapper_queue, &cache_scope);
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
+ auto compile_job_task = std::make_unique<CompileImportWrapperJob>(
+ isolate_->wasm_engine(), isolate_->counters(), native_module,
+ &import_wrapper_queue, &cache_scope);
+ auto compile_job = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible, std::move(compile_job_task));
- // Also compile in the current thread, in case there are no worker threads.
- while (base::Optional<WasmImportWrapperCache::CacheKey> key =
- import_wrapper_queue.pop()) {
- CompileImportWrapper(isolate_->wasm_engine(), native_module,
- isolate_->counters(), key->kind, key->signature,
- key->expected_arity, &cache_scope);
- }
- task_manager.CancelAndWait();
+ // Wait for the job to finish, while contributing in this thread.
+ compile_job->Join();
}
// Process the imports, including functions, tables, globals, and memory, in
@@ -1947,7 +1943,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// Update the local dispatch table first if necessary.
if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- uint32_t sig_id = module->signature_ids[function->sig_index];
+ uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index c9f984aaee..d1312edd33 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -28,6 +28,8 @@ namespace wasm {
class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
public:
explicit AsyncStreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+ AsyncStreamingDecoder(const AsyncStreamingDecoder&) = delete;
+ AsyncStreamingDecoder& operator=(const AsyncStreamingDecoder&) = delete;
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(Vector<const uint8_t> bytes) override;
@@ -218,8 +220,6 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// We need wire bytes in an array for deserializing cached modules.
std::vector<uint8_t> wire_bytes_for_deserializing_;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncStreamingDecoder);
};
void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
@@ -517,10 +517,6 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
Decoder decoder(buf,
streaming->module_offset() - static_cast<uint32_t>(offset()));
value_ = decoder.consume_u32v(field_name_);
- // The number of bytes we actually needed to read.
- DCHECK_GT(decoder.pc(), buffer().begin());
- bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
- TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
if (decoder.failed()) {
if (new_bytes == remaining_buf.size()) {
@@ -531,6 +527,11 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
return new_bytes;
}
+ // The number of bytes we actually needed to read.
+ DCHECK_GT(decoder.pc(), buffer().begin());
+ bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
+ TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
+
// We read all the bytes we needed.
DCHECK_GT(bytes_consumed_, offset());
new_bytes = bytes_consumed_ - offset();
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 2e9a2a8d06..3731511c24 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -178,38 +178,7 @@ class ValueType {
#undef DEF_ENUM
};
- constexpr bool is_reference_type() const {
- return kind() == kRef || kind() == kOptRef || kind() == kRtt;
- }
-
- constexpr bool is_object_reference_type() const {
- return kind() == kRef || kind() == kOptRef;
- }
-
- constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
-
- constexpr bool is_nullable() const { return kind() == kOptRef; }
-
- constexpr bool is_reference_to(uint32_t htype) const {
- return (kind() == kRef || kind() == kOptRef) &&
- heap_representation() == htype;
- }
-
- constexpr bool is_defaultable() const {
- CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
- return kind() != kRef && kind() != kRtt;
- }
-
- constexpr ValueType Unpacked() const {
- return is_packed() ? Primitive(kI32) : *this;
- }
-
- constexpr bool has_index() const {
- return is_reference_type() && heap_type().is_index();
- }
- constexpr bool is_rtt() const { return kind() == kRtt; }
- constexpr bool has_depth() const { return is_rtt(); }
-
+ /******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
static constexpr ValueType Primitive(Kind kind) {
CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
@@ -242,6 +211,43 @@ class ValueType {
return ValueType(bit_field);
}
+ /******************************** Type checks *******************************/
+ constexpr bool is_reference_type() const {
+ return kind() == kRef || kind() == kOptRef || kind() == kRtt;
+ }
+
+ constexpr bool is_object_reference_type() const {
+ return kind() == kRef || kind() == kOptRef;
+ }
+
+ constexpr bool is_nullable() const { return kind() == kOptRef; }
+
+ constexpr bool is_reference_to(uint32_t htype) const {
+ return (kind() == kRef || kind() == kOptRef) &&
+ heap_representation() == htype;
+ }
+
+ constexpr bool is_rtt() const { return kind() == kRtt; }
+ constexpr bool has_depth() const { return is_rtt(); }
+
+ constexpr bool has_index() const {
+ return is_reference_type() && heap_type().is_index();
+ }
+
+ constexpr bool is_defaultable() const {
+ CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
+ return kind() != kRef && kind() != kRtt;
+ }
+
+ constexpr bool is_bottom() const { return kind() == kBottom; }
+
+ constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
+
+ constexpr ValueType Unpacked() const {
+ return is_packed() ? Primitive(kI32) : *this;
+ }
+
+ /***************************** Field Accessors ******************************/
constexpr Kind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
CONSTEXPR_DCHECK(is_reference_type());
@@ -263,6 +269,14 @@ class ValueType {
// Useful when serializing this type to store it into a runtime object.
constexpr uint32_t raw_bit_field() const { return bit_field_; }
+ /*************************** Other utility methods **************************/
+ constexpr bool operator==(ValueType other) const {
+ return bit_field_ == other.bit_field_;
+ }
+ constexpr bool operator!=(ValueType other) const {
+ return bit_field_ != other.bit_field_;
+ }
+
static constexpr size_t bit_field_offset() {
return offsetof(ValueType, bit_field_);
}
@@ -292,13 +306,7 @@ class ValueType {
return size;
}
- constexpr bool operator==(ValueType other) const {
- return bit_field_ == other.bit_field_;
- }
- constexpr bool operator!=(ValueType other) const {
- return bit_field_ != other.bit_field_;
- }
-
+ /*************************** Machine-type related ***************************/
constexpr MachineType machine_type() const {
CONSTEXPR_DCHECK(kBottom != kind());
@@ -316,6 +324,29 @@ class ValueType {
return machine_type().representation();
}
+ static ValueType For(MachineType type) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return Primitive(kI32);
+ case MachineRepresentation::kWord64:
+ return Primitive(kI64);
+ case MachineRepresentation::kFloat32:
+ return Primitive(kF32);
+ case MachineRepresentation::kFloat64:
+ return Primitive(kF64);
+ case MachineRepresentation::kTaggedPointer:
+ return Ref(HeapType::kExtern, kNullable);
+ case MachineRepresentation::kSimd128:
+ return Primitive(kS128);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ /********************************* Encoding *********************************/
+
// Returns the first byte of this type's representation in the wasm binary
// format.
// For compatibility with the reftypes and exception-handling proposals, this
@@ -365,27 +396,9 @@ class ValueType {
heap_representation() == HeapType::kI31));
}
- static ValueType For(MachineType type) {
- switch (type.representation()) {
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- return Primitive(kI32);
- case MachineRepresentation::kWord64:
- return Primitive(kI64);
- case MachineRepresentation::kFloat32:
- return Primitive(kF32);
- case MachineRepresentation::kFloat64:
- return Primitive(kF64);
- case MachineRepresentation::kTaggedPointer:
- return Ref(HeapType::kExtern, kNullable);
- case MachineRepresentation::kSimd128:
- return Primitive(kS128);
- default:
- UNREACHABLE();
- }
- }
+ static constexpr int kLastUsedBit = 30;
+ /****************************** Pretty-printing *****************************/
constexpr char short_name() const {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
@@ -425,8 +438,6 @@ class ValueType {
return buf.str();
}
- static constexpr int kLastUsedBit = 30;
-
private:
// We only use 31 bits so ValueType fits in a Smi. This can be changed if
// needed.
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index ac68dc970c..cd90524599 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -323,8 +323,12 @@ void WasmCode::Validate() const {
void WasmCode::MaybePrint(const char* name) const {
// Determines whether flags want this code to be printed.
- if ((FLAG_print_wasm_code && kind() == kFunction) ||
- (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
+ bool function_index_matches =
+ (!IsAnonymous() &&
+ FLAG_print_wasm_code_function_index == static_cast<int>(index()));
+ if (FLAG_print_code ||
+ (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
+ : FLAG_print_wasm_stub_code)) {
Print(name);
}
}
@@ -854,7 +858,7 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
void NativeModule::LogWasmCodes(Isolate* isolate) {
if (!WasmCode::ShouldBeLogged(isolate)) return;
- TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "num_functions",
+ TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
module_->num_declared_functions);
// TODO(titzer): we skip the logging of the import wrappers
@@ -874,11 +878,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
- // For off-heap builtins, we create a copy of the off-heap instruction stream
- // instead of the on-heap code object containing the trampoline. Ensure that
- // we do not apply the on-heap reloc info to the off-heap instructions.
- const size_t relocation_size =
- code->is_off_heap_trampoline() ? 0 : code->relocation_size();
+ const size_t relocation_size = code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
reloc_info = OwnedVector<byte>::Of(
@@ -892,19 +892,25 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
}
+ CHECK(!code->is_off_heap_trampoline());
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
Vector<const byte> instructions(
- reinterpret_cast<byte*>(code->InstructionStart()),
- static_cast<size_t>(code->InstructionSize()));
+ reinterpret_cast<byte*>(code->raw_body_start()),
+ static_cast<size_t>(code->raw_body_size()));
const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ // Metadata offsets in Code objects are relative to the start of the metadata
+ // section, whereas WasmCode expects offsets relative to InstructionStart.
+ const int base_offset = code->raw_instruction_size();
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// Code objects contains real offsets but WasmCode expects an offset of 0 to
// mean 'empty'.
const int safepoint_table_offset =
- code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
- const int handler_table_offset = code->handler_table_offset();
- const int constant_pool_offset = code->constant_pool_offset();
- const int code_comments_offset = code->code_comments_offset();
+ code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
+ : 0;
+ const int handler_table_offset = base_offset + code->handler_table_offset();
+ const int constant_pool_offset = base_offset + code->constant_pool_offset();
+ const int code_comments_offset = base_offset + code->code_comments_offset();
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
@@ -912,7 +918,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
- code->InstructionStart();
+ code->raw_instruction_start();
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
@@ -1081,12 +1087,16 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
}
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCode");
base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
std::vector<WasmCode*> NativeModule::PublishCode(
Vector<std::unique_ptr<WasmCode>> codes) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
base::MutexGuard lock(&allocation_mutex_);
@@ -1362,10 +1372,10 @@ void NativeModule::AddCodeSpace(
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Builtins::Name builtin = stub_names[i];
- CHECK(embedded_data.ContainsBuiltin(builtin));
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
@@ -1468,7 +1478,11 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
size_t max_distance = std::max(
code_region.end() > table_start ? code_region.end() - table_start : 0,
table_end > code_region.begin() ? table_end - code_region.begin() : 0);
- return max_distance < WasmCodeAllocator::kMaxCodeSpaceSize;
+ // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
+ // every call or jump will target an address *within* the region, but never
+ // exactly the end of the region. So all occuring offsets are actually
+ // smaller than max_distance.
+ return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
@@ -1881,6 +1895,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.AddCompiledCode", "num", results.size());
DCHECK(!results.empty());
// First, allocate code space for all the results.
size_t total_code_space = 0;
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 5e8ed5475b..f017b977b5 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -57,6 +57,7 @@ struct WasmModule;
V(WasmFloat64ToNumber) \
V(WasmTaggedToFloat64) \
V(WasmAllocateJSArray) \
+ V(WasmAllocatePair) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
@@ -200,6 +201,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
static bool ShouldBeLogged(Isolate* isolate);
void LogCode(Isolate* isolate) const;
+ WasmCode(const WasmCode&) = delete;
+ WasmCode& operator=(const WasmCode&) = delete;
~WasmCode();
void IncRef() {
@@ -348,8 +351,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
// from (3) and all (2)), the code object is deleted and the memory for the
// machine code is freed.
std::atomic<int> ref_count_{1};
-
- DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
// Check that {WasmCode} objects are sufficiently small. We create many of them,
@@ -476,6 +477,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
#endif
+ NativeModule(const NativeModule&) = delete;
+ NativeModule& operator=(const NativeModule&) = delete;
+ ~NativeModule();
+
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
// The returned code still needs to be published via {PublishCode}.
@@ -612,8 +617,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return import_wrapper_cache_.get();
}
- ~NativeModule();
-
const WasmFeatures& enabled_features() const { return enabled_features_; }
// Returns the runtime stub id that corresponds to the given address (which
@@ -794,13 +797,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::atomic<size_t> liftoff_bailout_count_{0};
std::atomic<size_t> liftoff_code_size_{0};
std::atomic<size_t> turbofan_code_size_{0};
-
- DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
explicit WasmCodeManager(size_t max_committed);
+ WasmCodeManager(const WasmCodeManager&) = delete;
+ WasmCodeManager& operator=(const WasmCodeManager&) = delete;
#ifdef DEBUG
~WasmCodeManager() {
@@ -872,8 +875,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// End of fields protected by {native_modules_mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
// Within the scope, the native_module is writable and not executable.
@@ -901,6 +902,8 @@ class NativeModuleModificationScope final {
class V8_EXPORT_PRIVATE WasmCodeRefScope {
public:
WasmCodeRefScope();
+ WasmCodeRefScope(const WasmCodeRefScope&) = delete;
+ WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
~WasmCodeRefScope();
// Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
@@ -910,8 +913,6 @@ class V8_EXPORT_PRIVATE WasmCodeRefScope {
private:
WasmCodeRefScope* const previous_scope_;
std::unordered_set<WasmCode*> code_ptrs_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmCodeRefScope);
};
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
@@ -924,6 +925,9 @@ class GlobalWasmCodeRef {
code_->IncRef();
}
+ GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
+ GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;
+
~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
// Get a pointer to the contained {WasmCode} object. This is only guaranteed
@@ -934,7 +938,6 @@ class GlobalWasmCodeRef {
WasmCode* const code_;
// Also keep the {NativeModule} alive.
const std::shared_ptr<NativeModule> native_module_;
- DISALLOW_COPY_AND_ASSIGN(GlobalWasmCodeRef);
};
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 4e701599fc..31a519ee2e 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -123,6 +123,11 @@ constexpr uint32_t kExceptionAttribute = 0;
constexpr int kAnonymousFuncIndex = -1;
+// The number of calls to an exported wasm function that will be handled
+// by the generic wrapper. Once this threshold is reached, a specific wrapper
+// is to be compiled for the function's signature.
+constexpr uint32_t kGenericWrapperThreshold = 6;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.cc b/deps/v8/src/wasm/wasm-debug-evaluate.cc
index d8abe49679..bbd75f6b18 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.cc
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.cc
@@ -81,7 +81,7 @@ static bool CheckRangeOutOfBounds(uint32_t offset, uint32_t size,
class DebugEvaluatorProxy {
public:
- explicit DebugEvaluatorProxy(Isolate* isolate, StandardFrame* frame)
+ explicit DebugEvaluatorProxy(Isolate* isolate, CommonFrame* frame)
: isolate_(isolate), frame_(frame) {}
static void GetMemoryTrampoline(
@@ -283,7 +283,7 @@ class DebugEvaluatorProxy {
}
Isolate* isolate_;
- StandardFrame* frame_;
+ CommonFrame* frame_;
Handle<WasmInstanceObject> evaluator_;
Handle<WasmInstanceObject> debuggee_;
};
@@ -356,7 +356,7 @@ static bool VerifyEvaluatorInterface(const WasmModule* raw_module,
Maybe<std::string> DebugEvaluateImpl(
Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame) {
+ CommonFrame* frame) {
Isolate* isolate = debuggee_instance->GetIsolate();
HandleScope handle_scope(isolate);
WasmEngine* engine = isolate->wasm_engine();
@@ -433,7 +433,7 @@ Maybe<std::string> DebugEvaluateImpl(
MaybeHandle<String> DebugEvaluate(Vector<const byte> snippet,
Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame) {
+ CommonFrame* frame) {
Maybe<std::string> result =
DebugEvaluateImpl(snippet, debuggee_instance, frame);
if (result.IsNothing()) return {};
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.h b/deps/v8/src/wasm/wasm-debug-evaluate.h
index f4e3aef175..ab84a736a8 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.h
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.h
@@ -13,9 +13,9 @@ namespace v8 {
namespace internal {
namespace wasm {
-MaybeHandle<String> V8_EXPORT_PRIVATE DebugEvaluate(
- Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame);
+MaybeHandle<String> V8_EXPORT_PRIVATE
+DebugEvaluate(Vector<const byte> snippet,
+ Handle<WasmInstanceObject> debuggee_instance, CommonFrame* frame);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index d05caa4144..5da5525045 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -51,60 +51,6 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
-MaybeHandle<JSObject> CreateFunctionTablesObject(
- Handle<WasmInstanceObject> instance) {
- Isolate* isolate = instance->GetIsolate();
- auto tables = handle(instance->tables(), isolate);
- if (tables->length() == 0) return MaybeHandle<JSObject>();
-
- const char* table_label = "table%d";
- Handle<JSObject> tables_obj = isolate->factory()->NewJSObjectWithNullProto();
- for (int table_index = 0; table_index < tables->length(); ++table_index) {
- auto func_table =
- handle(WasmTableObject::cast(tables->get(table_index)), isolate);
- if (!IsSubtypeOf(func_table->type(), kWasmFuncRef, instance->module()))
- continue;
-
- Handle<String> table_name;
- if (!WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index)
- .ToHandle(&table_name)) {
- table_name =
- PrintFToOneByteString<true>(isolate, table_label, table_index);
- }
-
- Handle<JSObject> func_table_obj =
- isolate->factory()->NewJSObjectWithNullProto();
- JSObject::AddProperty(isolate, tables_obj, table_name, func_table_obj,
- NONE);
- for (int i = 0; i < func_table->current_length(); ++i) {
- Handle<Object> func = WasmTableObject::Get(isolate, func_table, i);
- DCHECK(!WasmCapiFunction::IsWasmCapiFunction(*func));
- if (func->IsNull(isolate)) continue;
-
- Handle<String> func_name;
- Handle<JSObject> func_obj =
- isolate->factory()->NewJSObjectWithNullProto();
-
- if (WasmExportedFunction::IsWasmExportedFunction(*func)) {
- auto target_func = Handle<WasmExportedFunction>::cast(func);
- auto target_instance = handle(target_func->instance(), isolate);
- auto module = handle(target_instance->module_object(), isolate);
- func_name = WasmModuleObject::GetFunctionName(
- isolate, module, target_func->function_index());
- } else if (WasmJSFunction::IsWasmJSFunction(*func)) {
- auto target_func = Handle<JSFunction>::cast(func);
- func_name = JSFunction::GetName(target_func);
- if (func_name->length() == 0) {
- func_name = isolate->factory()->InternalizeUtf8String("anonymous");
- }
- }
- JSObject::AddProperty(isolate, func_obj, func_name, func, NONE);
- JSObject::AddDataElement(func_table_obj, i, func_obj, NONE);
- }
- }
- return tables_obj;
-}
-
Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
Handle<ByteArray> bytes;
switch (value.type().kind()) {
@@ -164,8 +110,8 @@ MaybeHandle<String> GetLocalNameString(Isolate* isolate,
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
// Bounds were checked during decoding.
DCHECK(wire_bytes.BoundsCheck(name_ref));
- Vector<const char> name = wire_bytes.GetNameOrNull(name_ref);
- if (name.begin() == nullptr) return {};
+ WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ if (name.size() == 0) return {};
return isolate->factory()->NewStringFromUtf8(name);
}
@@ -272,14 +218,6 @@ Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
NONE);
}
- Handle<JSObject> function_tables_obj;
- if (CreateFunctionTablesObject(instance).ToHandle(&function_tables_obj)) {
- Handle<String> tables_name = isolate->factory()->InternalizeString(
- StaticCharVector("function tables"));
- JSObject::AddProperty(isolate, module_scope_object, tables_name,
- function_tables_obj, NONE);
- }
-
auto& globals = instance->module()->globals;
if (globals.size() > 0) {
Handle<JSObject> globals_obj =
@@ -310,6 +248,9 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
+ DebugInfoImpl(const DebugInfoImpl&) = delete;
+ DebugInfoImpl& operator=(const DebugInfoImpl&) = delete;
+
int GetNumLocals(Address pc) {
FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
@@ -340,6 +281,12 @@ class DebugInfoImpl {
debug_break_fp);
}
+ const WasmFunction& GetFunctionAtAddress(Address pc) {
+ FrameInspectionScope scope(this, pc);
+ auto* module = native_module_->module();
+ return module->functions[scope.code->index()];
+ }
+
Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
FrameInspectionScope scope(this, pc);
@@ -886,8 +833,6 @@ class DebugInfoImpl {
// Isolate-specific data.
std::unordered_map<Isolate*, PerIsolateDebugData> per_isolate_data_;
-
- DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
DebugInfo::DebugInfo(NativeModule* native_module)
@@ -909,6 +854,10 @@ WasmValue DebugInfo::GetStackValue(int index, Address pc, Address fp,
return impl_->GetStackValue(index, pc, fp, debug_break_fp);
}
+const wasm::WasmFunction& DebugInfo::GetFunctionAtAddress(Address pc) {
+ return impl_->GetFunctionAtAddress(pc);
+}
+
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
Address fp,
Address debug_break_fp) {
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 6050cb3a58..82fe974952 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -34,6 +34,7 @@ class NativeModule;
class WasmCode;
class WireBytesRef;
class WasmValue;
+struct WasmFunction;
// Side table storing information used to inspect Liftoff frames at runtime.
// This table is only created on demand for debugging, so it is not optimized
@@ -153,6 +154,9 @@ class V8_EXPORT_PRIVATE DebugInfo {
WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp);
int GetStackDepth(Address pc);
+
+ const wasm::WasmFunction& GetFunctionAtAddress(Address pc);
+
WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 9699516c27..9f962f76bd 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -419,7 +419,7 @@ WasmEngine::~WasmEngine() {
compile_job_handles = compile_job_handles_;
}
for (auto& job_handle : compile_job_handles) {
- if (job_handle->IsRunning()) job_handle->Cancel();
+ if (job_handle->IsValid()) job_handle->Cancel();
}
// All AsyncCompileJobs have been canceled.
@@ -1036,8 +1036,7 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
DCHECK_EQ(1, isolates_.count(isolate));
code_to_log.swap(isolates_[isolate]->code_to_log);
}
- TRACE_EVENT1("v8.wasm", "wasm.LogCode", "num_code_objects",
- code_to_log.size());
+ TRACE_EVENT1("v8.wasm", "wasm.LogCode", "codeObjects", code_to_log.size());
if (code_to_log.empty()) return;
for (WasmCode* code : code_to_log) {
code->LogCode(isolate);
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 2d96111462..a38308110b 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -137,6 +137,8 @@ class NativeModuleCache {
class V8_EXPORT_PRIVATE WasmEngine {
public:
WasmEngine();
+ WasmEngine(const WasmEngine&) = delete;
+ WasmEngine& operator=(const WasmEngine&) = delete;
~WasmEngine();
// Synchronously validates the given bytes that represent an encoded Wasm
@@ -413,8 +415,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmEngine);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index de1dd5e9df..e8e8cf8d50 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -5,12 +5,13 @@
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
+
#include <limits>
#include "include/v8config.h"
-
#include "src/base/bits.h"
#include "src/base/ieee754.h"
+#include "src/base/safe_conversions.h"
#include "src/common/assert-scope.h"
#include "src/utils/memcopy.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -179,12 +180,8 @@ void uint64_to_float64_wrapper(Address data) {
}
int32_t float32_to_int64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
float input = ReadUnalignedValue<float>(data);
- if (input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
- input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1;
}
@@ -193,11 +190,7 @@ int32_t float32_to_int64_wrapper(Address data) {
int32_t float32_to_uint64_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (input > -1.0 &&
- input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1;
}
@@ -205,12 +198,8 @@ int32_t float32_to_uint64_wrapper(Address data) {
}
int32_t float64_to_int64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
double input = ReadUnalignedValue<double>(data);
- if (input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1;
}
@@ -218,12 +207,8 @@ int32_t float64_to_int64_wrapper(Address data) {
}
int32_t float64_to_uint64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
double input = ReadUnalignedValue<double>(data);
- if (input > -1.0 &&
- input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1;
}
@@ -232,11 +217,7 @@ int32_t float64_to_uint64_wrapper(Address data) {
void float32_to_int64_sat_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- input >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return;
}
@@ -253,11 +234,7 @@ void float32_to_int64_sat_wrapper(Address data) {
void float32_to_uint64_sat_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (input < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- input >= 0.0) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
@@ -270,11 +247,7 @@ void float32_to_uint64_sat_wrapper(Address data) {
void float64_to_int64_sat_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- input >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return;
}
@@ -291,11 +264,7 @@ void float64_to_int64_sat_wrapper(Address data) {
void float64_to_uint64_sat_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<double>(std::numeric_limits<uint64_t>::max()) &&
- input >= 0.0) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
@@ -405,9 +374,12 @@ template <typename T, T (*float_round_op)(T)>
void simd_float_round_wrapper(Address data) {
constexpr int n = kSimd128Size / sizeof(T);
for (int i = 0; i < n; i++) {
- WriteUnalignedValue<T>(
- data + (i * sizeof(T)),
- float_round_op(ReadUnalignedValue<T>(data + (i * sizeof(T)))));
+ T input = ReadUnalignedValue<T>(data + (i * sizeof(T)));
+ T value = float_round_op(input);
+#if V8_OS_AIX
+ value = FpOpWorkaround<T>(input, value);
+#endif
+ WriteUnalignedValue<T>(data + (i * sizeof(T)), value);
}
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 16f4c5d3f9..4edd23eecf 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -10,25 +10,31 @@
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
+#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
#include "src/common/assert-scope.h"
#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
#include "src/tasks/task-utils.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/function-compiler.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
+#include "src/wasm/wasm-value.h"
using v8::internal::wasm::ErrorThrower;
using v8::internal::wasm::ScheduledErrorThrower;
@@ -102,7 +108,7 @@ WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
WasmStreaming::~WasmStreaming() = default;
void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
- TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "num_bytes", size);
+ TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "bytes", size);
impl_->OnBytesReceived(bytes, size);
}
@@ -1581,7 +1587,7 @@ constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
}
void WebAssemblyInstanceGetExports(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
@@ -2020,9 +2026,9 @@ Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
}
Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
- Handle<JSObject> object,
- const char* str,
- FunctionCallback func) {
+ Handle<JSObject> object,
+ const char* str,
+ FunctionCallback func) {
return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM);
}
@@ -2281,6 +2287,775 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
runtime_error, DONT_ENUM);
}
+namespace {
+void SetMapValue(Isolate* isolate, Handle<JSMap> map, Handle<Object> key,
+ Handle<Object> value) {
+ DCHECK(!map.is_null() && !key.is_null() && !value.is_null());
+ Handle<Object> argv[] = {key, value};
+ Execution::CallBuiltin(isolate, isolate->map_set(), map, arraysize(argv),
+ argv)
+ .Check();
+}
+
+Handle<Object> GetMapValue(Isolate* isolate, Handle<JSMap> map,
+ Handle<Object> key) {
+ DCHECK(!map.is_null() && !key.is_null());
+ Handle<Object> argv[] = {key};
+ return Execution::CallBuiltin(isolate, isolate->map_get(), map,
+ arraysize(argv), argv)
+ .ToHandleChecked();
+}
+
+// Look up a name in a name table. Name tables are stored under the "names"
+// property of the handler and map names to index.
+base::Optional<int> ResolveValueSelector(Isolate* isolate,
+ Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ size_t index = 0;
+ if (enable_index_lookup && property->AsIntegerIndex(&index) &&
+ index < kMaxInt) {
+ return static_cast<int>(index);
+ }
+
+ Handle<Object> name_table =
+ JSObject::GetProperty(isolate, handler, "names").ToHandleChecked();
+ DCHECK(name_table->IsJSMap());
+
+ Handle<Object> object =
+ GetMapValue(isolate, Handle<JSMap>::cast(name_table), property);
+ if (object->IsUndefined()) return {};
+ DCHECK(object->IsNumeric());
+ return NumberToInt32(*object);
+}
+
+// Helper for unpacking a maybe name that makes a default with an index if
+// the name is empty. If the name is not empty, it's prefixed with a $.
+Handle<String> GetNameOrDefault(Isolate* isolate,
+ MaybeHandle<String> maybe_name,
+ const char* default_name_prefix, int index) {
+ Handle<String> name;
+ if (maybe_name.ToHandle(&name)) {
+ return isolate->factory()
+ ->NewConsString(isolate->factory()->NewStringFromAsciiChecked("$"),
+ name)
+ .ToHandleChecked();
+ }
+
+ // Maximum length of the default names: $memory-2147483648\0
+ static constexpr int kMaxStrLen = 19;
+ EmbeddedVector<char, kMaxStrLen> value;
+ DCHECK_LT(strlen(default_name_prefix) + /*strlen(kMinInt)*/ 11, kMaxStrLen);
+ int len = SNPrintF(value, "%s%d", default_name_prefix, index);
+ return isolate->factory()->InternalizeString(value.SubVector(0, len));
+}
+
+// Generate names for the locals. Names either come from the name table,
+// otherwise the default $varX is used.
+std::vector<Handle<String>> GetLocalNames(Handle<WasmInstanceObject> instance,
+ Address pc) {
+ wasm::NativeModule* native_module = instance->module_object().native_module();
+ wasm::DebugInfo* debug_info = native_module->GetDebugInfo();
+ int num_locals = debug_info->GetNumLocals(pc);
+ auto* isolate = instance->GetIsolate();
+
+ wasm::ModuleWireBytes module_wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+ const wasm::WasmFunction& function = debug_info->GetFunctionAtAddress(pc);
+
+ std::vector<Handle<String>> names;
+ for (int i = 0; i < num_locals; ++i) {
+ wasm::WireBytesRef local_name_ref =
+ debug_info->GetLocalName(function.func_index, i);
+ DCHECK(module_wire_bytes.BoundsCheck(local_name_ref));
+ Vector<const char> name_vec =
+ module_wire_bytes.GetNameOrNull(local_name_ref);
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$var", i));
+ }
+
+ return names;
+}
+
+// Generate names for the globals. Names either come from the name table,
+// otherwise the default $globalX is used.
+std::vector<Handle<String>> GetGlobalNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto& globals = instance->module()->globals;
+ std::vector<Handle<String>> names;
+ for (uint32_t i = 0; i < globals.size(); ++i) {
+ names.emplace_back(GetNameOrDefault(
+ isolate, WasmInstanceObject::GetGlobalNameOrNull(isolate, instance, i),
+ "$global", i));
+ }
+ return names;
+}
+
+// Generate names for the functions.
+std::vector<Handle<String>> GetFunctionNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto* module = instance->module();
+
+ wasm::ModuleWireBytes wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+
+ std::vector<Handle<String>> names;
+ for (auto& function : module->functions) {
+ DCHECK_EQ(function.func_index, names.size());
+ wasm::WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(
+ wire_bytes, function.func_index, VectorOf(module->export_table));
+ DCHECK(wire_bytes.BoundsCheck(name_ref));
+ Vector<const char> name_vec = wire_bytes.GetNameOrNull(name_ref);
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$func", function.func_index));
+ }
+
+ return names;
+}
+
+// Generate names for the imports.
+std::vector<Handle<String>> GetImportNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_imports = static_cast<int>(module->import_table.size());
+
+ std::vector<Handle<String>> names;
+ for (int index = 0; index < num_imports; ++index) {
+ const wasm::WasmImport& import = module->import_table[index];
+
+ names.emplace_back(WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, import.field_name, kInternalize));
+ }
+
+ return names;
+}
+
+// Generate names for the memories.
+std::vector<Handle<String>> GetMemoryNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+
+ std::vector<Handle<String>> names;
+ uint32_t memory_count = instance->has_memory_object() ? 1 : 0;
+ for (uint32_t memory_index = 0; memory_index < memory_count; ++memory_index) {
+ names.emplace_back(GetNameOrDefault(isolate,
+ WasmInstanceObject::GetMemoryNameOrNull(
+ isolate, instance, memory_index),
+ "$memory", memory_index));
+ }
+
+ return names;
+}
+
+// Generate names for the tables.
+std::vector<Handle<String>> GetTableNames(Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto tables = handle(instance->tables(), isolate);
+
+ std::vector<Handle<String>> names;
+ for (int table_index = 0; table_index < tables->length(); ++table_index) {
+ auto func_table =
+ handle(WasmTableObject::cast(tables->get(table_index)), isolate);
+ if (!func_table->type().is_reference_to(wasm::HeapType::kFunc)) continue;
+
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index),
+ "$table", table_index));
+ }
+ return names;
+}
+
+// Generate names for the exports
+std::vector<Handle<String>> GetExportNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_exports = static_cast<int>(module->export_table.size());
+
+ std::vector<Handle<String>> names;
+
+ for (int index = 0; index < num_exports; ++index) {
+ const wasm::WasmExport& exp = module->export_table[index];
+
+ names.emplace_back(WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, exp.name, kInternalize));
+ }
+ return names;
+}
+
+Handle<WasmInstanceObject> GetInstance(Isolate* isolate,
+ Handle<JSObject> handler) {
+ Handle<Object> instance =
+ JSObject::GetProperty(isolate, handler, "instance").ToHandleChecked();
+ DCHECK(instance->IsWasmInstanceObject());
+ return Handle<WasmInstanceObject>::cast(instance);
+}
+
+Address GetPC(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> pc =
+ JSObject::GetProperty(isolate, handler, "pc").ToHandleChecked();
+ DCHECK(pc->IsBigInt());
+ return Handle<BigInt>::cast(pc)->AsUint64();
+}
+
+Address GetFP(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> fp =
+ JSObject::GetProperty(isolate, handler, "fp").ToHandleChecked();
+ DCHECK(fp->IsBigInt());
+ return Handle<BigInt>::cast(fp)->AsUint64();
+}
+
+Address GetCalleeFP(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> callee_fp =
+ JSObject::GetProperty(isolate, handler, "callee_fp").ToHandleChecked();
+ DCHECK(callee_fp->IsBigInt());
+ return Handle<BigInt>::cast(callee_fp)->AsUint64();
+}
+
+// Convert a WasmValue to an appropriate JS representation.
+static Handle<Object> WasmValueToObject(Isolate* isolate,
+ wasm::WasmValue value) {
+ auto* factory = isolate->factory();
+ switch (value.type().kind()) {
+ case wasm::ValueType::kI32:
+ return factory->NewNumberFromInt(value.to_i32());
+ case wasm::ValueType::kI64:
+ return BigInt::FromInt64(isolate, value.to_i64());
+ case wasm::ValueType::kF32:
+ return factory->NewNumber(value.to_f32());
+ case wasm::ValueType::kF64:
+ return factory->NewNumber(value.to_f64());
+ case wasm::ValueType::kS128: {
+ wasm::Simd128 s128 = value.to_s128();
+ Handle<JSArrayBuffer> buffer;
+ if (!isolate->factory()
+ ->NewJSArrayBufferAndBackingStore(
+ kSimd128Size, InitializedFlag::kUninitialized)
+ .ToHandle(&buffer)) {
+ isolate->FatalProcessOutOfHeapMemory(
+ "failed to allocate backing store");
+ }
+
+ memcpy(buffer->allocation_base(), s128.bytes(), buffer->byte_length());
+ return isolate->factory()->NewJSTypedArray(kExternalUint8Array, buffer, 0,
+ buffer->byte_length());
+ }
+ case wasm::ValueType::kRef:
+ return value.to_externref();
+ default:
+ break;
+ }
+ return factory->undefined_value();
+}
+
+base::Optional<int> HasLocalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ Address pc = GetPC(isolate, handler);
+
+ wasm::DebugInfo* debug_info =
+ instance->module_object().native_module()->GetDebugInfo();
+ int num_locals = debug_info->GetNumLocals(pc);
+ if (0 <= index && index < num_locals) return index;
+ return {};
+}
+
+Handle<Object> GetLocalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Factory* factory = isolate->factory();
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+
+ base::Optional<int> index =
+ HasLocalImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return factory->undefined_value();
+ Address pc = GetPC(isolate, handler);
+ Address fp = GetFP(isolate, handler);
+ Address callee_fp = GetCalleeFP(isolate, handler);
+
+ wasm::DebugInfo* debug_info =
+ instance->module_object().native_module()->GetDebugInfo();
+ wasm::WasmValue value = debug_info->GetLocalValue(*index, pc, fp, callee_fp);
+ return WasmValueToObject(isolate, value);
+}
+
+base::Optional<int> HasGlobalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+
+ const std::vector<wasm::WasmGlobal>& globals = instance->module()->globals;
+ if (globals.size() <= kMaxInt && 0 <= *index &&
+ *index < static_cast<int>(globals.size())) {
+ return index;
+ }
+ return {};
+}
+
+Handle<Object> GetGlobalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasGlobalImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const std::vector<wasm::WasmGlobal>& globals = instance->module()->globals;
+ return WasmValueToObject(
+ isolate, WasmInstanceObject::GetGlobalValue(instance, globals[*index]));
+}
+
+base::Optional<int> HasMemoryImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (index && *index == 0 && instance->has_memory_object()) return index;
+ return {};
+}
+
+Handle<Object> GetMemoryImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasMemoryImpl(isolate, property, handler, enable_index_lookup);
+ if (index) return handle(instance->memory_object(), isolate);
+ return isolate->factory()->undefined_value();
+}
+
+base::Optional<int> HasFunctionImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ const std::vector<wasm::WasmFunction>& functions =
+ instance->module()->functions;
+ if (functions.size() <= kMaxInt && 0 <= *index &&
+ *index < static_cast<int>(functions.size())) {
+ return index;
+ }
+ return {};
+}
+
+Handle<Object> GetFunctionImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasFunctionImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ return WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
+ *index);
+}
+
+base::Optional<int> HasTableImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ Handle<FixedArray> tables(instance->tables(), isolate);
+ int num_tables = tables->length();
+ if (*index < 0 || *index >= num_tables) return {};
+
+ Handle<WasmTableObject> func_table(WasmTableObject::cast(tables->get(*index)),
+ isolate);
+ if (func_table->type().is_reference_to(wasm::HeapType::kFunc)) return index;
+ return {};
+}
+
+Handle<Object> GetTableImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasTableImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ Handle<WasmTableObject> func_table(
+ WasmTableObject::cast(instance->tables().get(*index)), isolate);
+ return func_table;
+}
+
+base::Optional<int> HasImportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_imports = static_cast<int>(module->import_table.size());
+ if (0 <= *index && *index < num_imports) return index;
+ return {};
+}
+
+Handle<JSObject> GetExternalObject(Isolate* isolate,
+ wasm::ImportExportKindCode kind,
+ uint32_t index) {
+ Handle<JSObject> result = isolate->factory()->NewJSObjectWithNullProto();
+ Handle<Object> value = isolate->factory()->NewNumberFromUint(index);
+ switch (kind) {
+ case wasm::kExternalFunction:
+ JSObject::AddProperty(isolate, result, "func", value, NONE);
+ break;
+ case wasm::kExternalGlobal:
+ JSObject::AddProperty(isolate, result, "global", value, NONE);
+ break;
+ case wasm::kExternalTable:
+ JSObject::AddProperty(isolate, result, "table", value, NONE);
+ break;
+ case wasm::kExternalMemory:
+ JSObject::AddProperty(isolate, result, "mem", value, NONE);
+ break;
+ case wasm::kExternalException:
+ JSObject::AddProperty(isolate, result, "exn", value, NONE);
+ break;
+ }
+ return result;
+}
+
+Handle<Object> GetImportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasImportImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const wasm::WasmImport& imp = instance->module()->import_table[*index];
+ return GetExternalObject(isolate, imp.kind, imp.index);
+}
+
+base::Optional<int> HasExportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_exports = static_cast<int>(module->export_table.size());
+ if (0 <= *index && *index < num_exports) return index;
+ return {};
+}
+
+Handle<Object> GetExportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasExportImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const wasm::WasmExport& exp = instance->module()->export_table[*index];
+ return GetExternalObject(isolate, exp.kind, exp.index);
+}
+
+// Generic has trap callback for the index space proxies.
+template <base::Optional<int> Impl(Isolate*, Handle<Name>, Handle<JSObject>,
+ bool)>
+void HasTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args.This()->IsObject());
+ Handle<JSObject> handler =
+ Handle<JSObject>::cast(Utils::OpenHandle(*args.This()));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+ args.GetReturnValue().Set(Impl(isolate, property, handler, true).has_value());
+}
+
+// Generic get trap callback for the index space proxies.
+template <Handle<Object> Impl(Isolate*, Handle<Name>, Handle<JSObject>, bool)>
+void GetTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args.This()->IsObject());
+ Handle<JSObject> handler =
+ Handle<JSObject>::cast(Utils::OpenHandle(*args.This()));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+ args.GetReturnValue().Set(
+ Utils::ToLocal(Impl(isolate, property, handler, true)));
+}
+
+template <typename ReturnT>
+ReturnT DelegateToplevelCall(Isolate* isolate, Handle<JSObject> target,
+ Handle<Name> property, const char* index_space,
+ ReturnT (*impl)(Isolate*, Handle<Name>,
+ Handle<JSObject>, bool)) {
+ Handle<Object> namespace_proxy =
+ JSObject::GetProperty(isolate, target, index_space).ToHandleChecked();
+ DCHECK(namespace_proxy->IsJSProxy());
+ Handle<JSObject> namespace_handler(
+ JSObject::cast(Handle<JSProxy>::cast(namespace_proxy)->handler()),
+ isolate);
+ return impl(isolate, property, namespace_handler, false);
+}
+
+template <typename ReturnT>
+using DelegateCallback = ReturnT (*)(Isolate*, Handle<Name>, Handle<JSObject>,
+ bool);
+
+// Has trap callback for the top-level proxy.
+void ToplevelHasTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args[0]->IsObject());
+ Handle<JSObject> target = Handle<JSObject>::cast(Utils::OpenHandle(*args[0]));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+
+ // First check if the property exists on the target.
+ if (JSObject::HasProperty(target, property).FromMaybe(false)) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+
+ // Now check the index space proxies in order if they know the property.
+ constexpr std::pair<const char*, DelegateCallback<base::Optional<int>>>
+ kDelegates[] = {{"memories", HasMemoryImpl},
+ {"locals", HasLocalImpl},
+ {"tables", HasTableImpl},
+ {"functions", HasFunctionImpl},
+ {"globals", HasGlobalImpl}};
+ for (auto& delegate : kDelegates) {
+ if (DelegateToplevelCall(isolate, target, property, delegate.first,
+ delegate.second)) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+ args.GetReturnValue().Set(false);
+ }
+}
+
+// Get trap callback for the top-level proxy.
+void ToplevelGetTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args[0]->IsObject());
+ Handle<JSObject> target = Handle<JSObject>::cast(Utils::OpenHandle(*args[0]));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+
+ // First, check if the property is a proper property on the target. If so,
+ // return its value.
+ Handle<Object> value =
+ JSObject::GetProperty(isolate, target, property).ToHandleChecked();
+ if (!value->IsUndefined()) {
+ args.GetReturnValue().Set(Utils::ToLocal(value));
+ return;
+ }
+
+ // Try the index space proxies in the correct disambiguation order.
+ constexpr std::pair<const char*, DelegateCallback<Handle<Object>>>
+ kDelegates[] = {{"memories", GetMemoryImpl},
+ {"locals", GetLocalImpl},
+ {"tables", GetTableImpl},
+ {"functions", GetFunctionImpl},
+ {"globals", GetGlobalImpl}};
+ for (auto& delegate : kDelegates) {
+ value = DelegateToplevelCall(isolate, target, property, delegate.first,
+ delegate.second);
+ if (!value->IsUndefined()) {
+ args.GetReturnValue().Set(Utils::ToLocal(value));
+ return;
+ }
+ }
+}
+
+// Populate a JSMap with name->index mappings from an ordered list of names.
+Handle<JSMap> GetNameTable(Isolate* isolate,
+ const std::vector<Handle<String>>& names) {
+ Factory* factory = isolate->factory();
+ Handle<JSMap> name_table = factory->NewJSMap();
+
+ for (size_t i = 0; i < names.size(); ++i) {
+ SetMapValue(isolate, name_table, names[i], factory->NewNumberFromInt64(i));
+ }
+ return name_table;
+}
+
+// Produce a JSProxy with a given name table and get and has trap handlers.
+Handle<JSProxy> GetJSProxy(
+ WasmFrame* frame, Handle<JSMap> name_table,
+ void (*get_callback)(const v8::FunctionCallbackInfo<v8::Value>&),
+ void (*has_callback)(const v8::FunctionCallbackInfo<v8::Value>&)) {
+ Isolate* isolate = frame->isolate();
+ Factory* factory = isolate->factory();
+ Handle<JSObject> target = factory->NewJSObjectWithNullProto();
+ Handle<JSObject> handler = factory->NewJSObjectWithNullProto();
+
+ // Besides the name table, the get and has traps need access to the instance
+ // and frame information.
+ JSObject::AddProperty(isolate, handler, "names", name_table, DONT_ENUM);
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+ JSObject::AddProperty(isolate, handler, "instance", instance, DONT_ENUM);
+ Handle<BigInt> pc = BigInt::FromInt64(isolate, frame->pc());
+ JSObject::AddProperty(isolate, handler, "pc", pc, DONT_ENUM);
+ Handle<BigInt> fp = BigInt::FromInt64(isolate, frame->fp());
+ JSObject::AddProperty(isolate, handler, "fp", fp, DONT_ENUM);
+ Handle<BigInt> callee_fp = BigInt::FromInt64(isolate, frame->callee_fp());
+ JSObject::AddProperty(isolate, handler, "callee_fp", callee_fp, DONT_ENUM);
+
+ InstallFunc(isolate, handler, "get", get_callback, 3, false, READ_ONLY);
+ InstallFunc(isolate, handler, "has", has_callback, 2, false, READ_ONLY);
+
+ return factory->NewJSProxy(target, handler);
+}
+
+Handle<JSObject> GetStackObject(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ Handle<JSObject> object = isolate->factory()->NewJSObjectWithNullProto();
+ wasm::DebugInfo* debug_info =
+ frame->wasm_instance().module_object().native_module()->GetDebugInfo();
+ int num_values = debug_info->GetStackDepth(frame->pc());
+ for (int i = 0; i < num_values; ++i) {
+ wasm::WasmValue value = debug_info->GetStackValue(
+ i, frame->pc(), frame->fp(), frame->callee_fp());
+ JSObject::AddDataElement(object, i, WasmValueToObject(isolate, value),
+ NONE);
+ }
+ return object;
+}
+} // namespace
+
+// This function generates the JS debug proxy for a given Wasm frame. The debug
+// proxy is used when evaluating debug JS expressions on a wasm frame and let's
+// the developer inspect the engine state from JS. The proxy provides the
+// following interface:
+//
+// type WasmSimdValue = Uint8Array;
+// type WasmValue = number | bigint | object | WasmSimdValue;
+// type WasmFunction = (... args : WasmValue[]) = > WasmValue;
+// type WasmExport = {name : string} & ({func : number} | {table : number} |
+// {mem : number} | {global : number});
+// type WasmImport = {name : string, module : string} &
+// ({func : number} | {table : number} | {mem : number} |
+// {global : number});
+// interface WasmInterface {
+// $globalX: WasmValue;
+// $varX: WasmValue;
+// $funcX(a : WasmValue /*, ...*/) : WasmValue;
+// readonly $memoryX : WebAssembly.Memory;
+// readonly $tableX : WebAssembly.Table;
+// readonly memories : {[nameOrIndex:string | number] : WebAssembly.Memory};
+// readonly tables : {[nameOrIndex:string | number] : WebAssembly.Table};
+// readonly stack : WasmValue[];
+// readonly imports : {[nameOrIndex:string | number] : WasmImport};
+// readonly exports : {[nameOrIndex:string | number] : WasmExport};
+// readonly globals : {[nameOrIndex:string | number] : WasmValue};
+// readonly locals : {[nameOrIndex:string | number] : WasmValue};
+// readonly functions : {[nameOrIndex:string | number] : WasmFunction};
+// }
+//
+// The wasm index spaces memories, tables, imports, exports, globals, locals
+// functions are JSProxies that lazily produce values either by index or by
+// name. A top level JSProxy is wrapped around those for top-level lookup of
+// names in the disambiguation order  memory, local, table, function, global.
+// Import and export names are not globally resolved.
+
+Handle<JSProxy> WasmJs::GetJSDebugProxy(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ Factory* factory = isolate->factory();
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+
+ // The top level proxy delegates lookups to the index space proxies.
+ Handle<JSObject> handler = factory->NewJSObjectWithNullProto();
+ InstallFunc(isolate, handler, "get", ToplevelGetTrapCallback, 3, false,
+ READ_ONLY);
+ InstallFunc(isolate, handler, "has", ToplevelHasTrapCallback, 2, false,
+ READ_ONLY);
+
+ Handle<JSObject> target = factory->NewJSObjectWithNullProto();
+
+ // Generate JSMaps per index space for name->index lookup. Every index space
+ // proxy is associated with its table for local name lookup.
+
+ auto local_name_table =
+ GetNameTable(isolate, GetLocalNames(instance, frame->pc()));
+ auto locals =
+ GetJSProxy(frame, local_name_table, GetTrapCallback<GetLocalImpl>,
+ HasTrapCallback<HasLocalImpl>);
+ JSObject::AddProperty(isolate, target, "locals", locals, READ_ONLY);
+
+ auto global_name_table = GetNameTable(isolate, GetGlobalNames(instance));
+ auto globals =
+ GetJSProxy(frame, global_name_table, GetTrapCallback<GetGlobalImpl>,
+ HasTrapCallback<HasGlobalImpl>);
+ JSObject::AddProperty(isolate, target, "globals", globals, READ_ONLY);
+
+ auto function_name_table = GetNameTable(isolate, GetFunctionNames(instance));
+ auto functions =
+ GetJSProxy(frame, function_name_table, GetTrapCallback<GetFunctionImpl>,
+ HasTrapCallback<HasFunctionImpl>);
+ JSObject::AddProperty(isolate, target, "functions", functions, READ_ONLY);
+
+ auto memory_name_table = GetNameTable(isolate, GetMemoryNames(instance));
+ auto memories =
+ GetJSProxy(frame, memory_name_table, GetTrapCallback<GetMemoryImpl>,
+ HasTrapCallback<HasMemoryImpl>);
+ JSObject::AddProperty(isolate, target, "memories", memories, READ_ONLY);
+
+ auto table_name_table = GetNameTable(isolate, GetTableNames(instance));
+ auto tables =
+ GetJSProxy(frame, table_name_table, GetTrapCallback<GetTableImpl>,
+ HasTrapCallback<HasTableImpl>);
+ JSObject::AddProperty(isolate, target, "tables", tables, READ_ONLY);
+
+ auto import_name_table = GetNameTable(isolate, GetImportNames(instance));
+ auto imports =
+ GetJSProxy(frame, import_name_table, GetTrapCallback<GetImportImpl>,
+ HasTrapCallback<HasImportImpl>);
+ JSObject::AddProperty(isolate, target, "imports", imports, READ_ONLY);
+
+ auto export_name_table = GetNameTable(isolate, GetExportNames(instance));
+ auto exports =
+ GetJSProxy(frame, export_name_table, GetTrapCallback<GetExportImpl>,
+ HasTrapCallback<HasExportImpl>);
+ JSObject::AddProperty(isolate, target, "exports", exports, READ_ONLY);
+
+ auto stack = GetStackObject(frame);
+ JSObject::AddProperty(isolate, target, "stack", stack, READ_ONLY);
+
+ return factory->NewJSProxy(target, handler);
+}
+
#undef ASSIGN
#undef EXTRACT_THIS
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 6f83ad6326..4c9ae9645b 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -9,6 +9,8 @@
namespace v8 {
namespace internal {
+class JSProxy;
+class WasmFrame;
namespace wasm {
class StreamingDecoder;
@@ -19,6 +21,8 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
+
+ V8_EXPORT_PRIVATE static Handle<JSProxy> GetJSDebugProxy(WasmFrame* frame);
};
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index de895e6429..f2e7d63f52 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -597,7 +597,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer->write_size(tables_.size());
for (const WasmTable& table : tables_) {
- buffer->write_u8(table.type.value_type_code());
+ WriteValueType(buffer, table.type);
buffer->write_u8(table.has_maximum ? kWithMaximum : kNoMaximum);
buffer->write_size(table.min_size);
if (table.has_maximum) buffer->write_size(table.max_size);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index fdd64950df..7d6df375aa 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -236,6 +236,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
public:
explicit WasmModuleBuilder(Zone* zone);
+ WasmModuleBuilder(const WasmModuleBuilder&) = delete;
+ WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
// Building methods.
uint32_t AddImport(Vector<const char> name, FunctionSig* sig,
@@ -361,8 +363,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Indirect functions must be allocated before adding extra tables.
bool allocating_indirect_functions_allowed_ = true;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(WasmModuleBuilder);
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 3d935f27be..afe192a3d3 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -137,7 +137,7 @@ void LazilyGeneratedNames::AddForTesting(int function_index,
AsmJsOffsetInformation::AsmJsOffsetInformation(
Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<uint8_t>::Of(encoded_offsets)) {}
+ : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
@@ -618,10 +618,11 @@ size_t EstimateStoredSize(const WasmModule* module) {
(module->signature_zone ? module->signature_zone->allocation_size()
: 0) +
VectorSize(module->types) + VectorSize(module->type_kinds) +
- VectorSize(module->signature_ids) + VectorSize(module->functions) +
- VectorSize(module->data_segments) + VectorSize(module->tables) +
- VectorSize(module->import_table) + VectorSize(module->export_table) +
- VectorSize(module->exceptions) + VectorSize(module->elem_segments);
+ VectorSize(module->canonicalized_type_ids) +
+ VectorSize(module->functions) + VectorSize(module->data_segments) +
+ VectorSize(module->tables) + VectorSize(module->import_table) +
+ VectorSize(module->export_table) + VectorSize(module->exceptions) +
+ VectorSize(module->elem_segments);
}
size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig* sig,
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 2ffc92e390..9c54f17b9c 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -266,6 +266,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
+ bool is_memory64 = false; // true if the memory is 64 bit
bool has_memory = false; // true if the memory was defined or imported
bool mem_export = false; // true if the memory is exported
int start_function_index = -1; // start function, >= 0 if any
@@ -283,9 +284,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_data_segments = 0; // From the DataCount section.
WireBytesRef code = {0, 0};
WireBytesRef name = {0, 0};
- std::vector<TypeDefinition> types; // by type index
- std::vector<uint8_t> type_kinds; // by type index
- std::vector<uint32_t> signature_ids; // by signature index
+ std::vector<TypeDefinition> types; // by type index
+ std::vector<uint8_t> type_kinds; // by type index
+ // Map from each type index to the index of its corresponding canonical type.
+ // Note: right now, only functions are canonicalized, and arrays and structs
+ // map to themselves.
+ std::vector<uint32_t> canonicalized_type_ids;
bool has_type(uint32_t index) const { return index < types.size(); }
@@ -293,37 +297,43 @@ struct V8_EXPORT_PRIVATE WasmModule {
types.push_back(TypeDefinition(sig));
type_kinds.push_back(kWasmFunctionTypeCode);
uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
- signature_ids.push_back(canonical_id);
- }
- const FunctionSig* signature(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmFunctionTypeCode);
- return types[index].function_sig;
+ canonicalized_type_ids.push_back(canonical_id);
}
bool has_signature(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmFunctionTypeCode;
}
+ const FunctionSig* signature(uint32_t index) const {
+ DCHECK(has_signature(index));
+ return types[index].function_sig;
+ }
+
void add_struct_type(const StructType* type) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmStructTypeCode);
- }
- const StructType* struct_type(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmStructTypeCode);
- return types[index].struct_type;
+ // No canonicalization for structs.
+ canonicalized_type_ids.push_back(0);
}
bool has_struct(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmStructTypeCode;
}
+ const StructType* struct_type(uint32_t index) const {
+ DCHECK(has_struct(index));
+ return types[index].struct_type;
+ }
+
void add_array_type(const ArrayType* type) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmArrayTypeCode);
- }
- const ArrayType* array_type(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmArrayTypeCode);
- return types[index].array_type;
+ // No canonicalization for arrays.
+ canonicalized_type_ids.push_back(0);
}
bool has_array(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
}
+ const ArrayType* array_type(uint32_t index) const {
+ DCHECK(has_array(index));
+ return types[index].array_type;
+ }
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
@@ -344,9 +354,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<AsmJsOffsetInformation> asm_js_offset_information;
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmModule);
+ WasmModule(const WasmModule&) = delete;
+ WasmModule& operator=(const WasmModule&) = delete;
};
// Static representation of a wasm indirect call table.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 984c6d0f5b..744a16c855 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -28,6 +28,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/wasm/wasm-objects-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
@@ -58,7 +60,8 @@ CAST_ACCESSOR(WasmArray)
Object value = TaggedField<Object, offset>::load(isolate, *this); \
return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
} \
- ACCESSORS(holder, name, type, offset)
+ ACCESSORS_CHECKED2(holder, name, type, offset, \
+ !value.IsUndefined(GetReadOnlyRoots(isolate)), true)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
type holder::name() const { \
@@ -329,11 +332,12 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, call_count, kCallCountOffset)
ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, wasm_call_target, Object,
kWasmCallTargetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
-ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
// WasmJSFunction
WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
@@ -352,6 +356,8 @@ ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
+ACCESSORS(WasmJSFunctionData, wasm_to_js_wrapper_code, Code,
+ kWasmToJsWrapperCodeOffset)
// WasmCapiFunction
WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
@@ -359,16 +365,6 @@ WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
}
CAST_ACCESSOR(WasmCapiFunction)
-// WasmCapiFunctionData
-OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData, Struct)
-CAST_ACCESSOR(WasmCapiFunctionData)
-PRIMITIVE_ACCESSORS(WasmCapiFunctionData, call_target, Address,
- kCallTargetOffset)
-ACCESSORS(WasmCapiFunctionData, embedder_data, Foreign, kEmbedderDataOffset)
-ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
-ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
- kSerializedSignatureOffset)
-
// WasmExternalFunction
WasmExternalFunction::WasmExternalFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmExternalFunction(*this));
@@ -451,6 +447,11 @@ int WasmArray::SizeFor(Map map, int length) {
}
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ // Due to the type-specific pointer tags for external pointers, we need to
+ // allocate an entry in the table here even though it will just store nullptr.
+ AllocateExternalPointerEntries(isolate);
+#endif
set_foreign_address(isolate, 0);
}
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index cf78ab5ff3..d06caef486 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -1508,10 +1508,15 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
callable = resolved.second; // Update to ultimate target.
DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
wasm::CompilationEnv env = native_module->CreateCompilationEnv();
- SharedFunctionInfo shared = js_function->shared();
+ // {expected_arity} should only be used if kind != kJSFunctionArityMismatch.
+ int expected_arity = -1;
+ if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
+ expected_arity = Handle<JSFunction>::cast(callable)
+ ->shared()
+ .internal_formal_parameter_count();
+ }
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- isolate->wasm_engine(), &env, kind, sig, false,
- shared.internal_formal_parameter_count());
+ isolate->wasm_engine(), &env, kind, sig, false, expected_arity);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -1674,6 +1679,7 @@ Handle<WasmExceptionObject> WasmExceptionObject::New(
return exception;
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
@@ -1687,6 +1693,7 @@ bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
return true;
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmCapiFunction::MatchesSignature(const wasm::FunctionSig* sig) const {
// TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc.
int param_count = static_cast<int>(sig->parameter_count());
@@ -1833,16 +1840,14 @@ bool WasmCapiFunction::IsWasmCapiFunction(Object object) {
Handle<WasmCapiFunction> WasmCapiFunction::New(
Isolate* isolate, Address call_target, Handle<Foreign> embedder_data,
Handle<PodArray<wasm::ValueType>> serialized_signature) {
- Handle<WasmCapiFunctionData> fun_data =
- Handle<WasmCapiFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_CAPI_FUNCTION_DATA_TYPE, AllocationType::kOld));
- fun_data->set_call_target(call_target);
- fun_data->set_embedder_data(*embedder_data);
- fun_data->set_serialized_signature(*serialized_signature);
// TODO(jkummerow): Install a JavaScript wrapper. For now, calling
// these functions directly is unsupported; they can only be called
// from Wasm code.
- fun_data->set_wrapper_code(isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<WasmCapiFunctionData> fun_data =
+ isolate->factory()->NewWasmCapiFunctionData(
+ call_target, embedder_data,
+ isolate->builtins()->builtin_handle(Builtins::kIllegal),
+ serialized_signature, AllocationType::kOld);
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
return Handle<WasmCapiFunction>::cast(
@@ -1884,10 +1889,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+ function_data->set_signature(*sig_foreign);
+ function_data->set_call_count(0);
function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
function_data->set_packed_args_size(0);
- function_data->set_signature(*sig_foreign);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -1948,6 +1954,23 @@ const wasm::FunctionSig* WasmExportedFunction::sig() {
return instance().module()->functions[function_index()].sig;
}
+bool WasmExportedFunction::MatchesSignature(
+ const WasmModule* other_module, const wasm::FunctionSig* other_sig) {
+ const wasm::FunctionSig* sig = this->sig();
+ if (sig->parameter_count() != other_sig->parameter_count() ||
+ sig->return_count() != other_sig->return_count()) {
+ return false;
+ }
+
+ for (int i = 0; i < sig->all().size(); i++) {
+ if (!wasm::EquivalentTypes(sig->all()[i], other_sig->all()[i],
+ this->instance().module(), other_module)) {
+ return false;
+ }
+ }
+ return true;
+}
+
// static
bool WasmJSFunction::IsWasmJSFunction(Object object) {
if (!object.IsJSFunction()) return false;
@@ -1955,8 +1978,6 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
return js_function.shared().HasWasmJSFunctionData();
}
-// TODO(7748): WasmJSFunctions should compile/find and store an import wrapper
-// in case they are called from within wasm.
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
const wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
@@ -1973,6 +1994,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
// signature instead of compiling a new one for every instantiation.
Handle<Code> wrapper_code =
compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
+
Handle<WasmJSFunctionData> function_data =
Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
@@ -1981,6 +2003,30 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
function_data->set_serialized_signature(*serialized_sig);
function_data->set_callable(*callable);
function_data->set_wrapper_code(*wrapper_code);
+ // Use Abort() as a default value (it will never be called if not overwritten
+ // below).
+ function_data->set_wasm_to_js_wrapper_code(
+ isolate->heap()->builtin(Builtins::kAbort));
+
+ if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
+ using CK = compiler::WasmImportCallKind;
+ int expected_arity = parameter_count;
+ CK kind = compiler::kDefaultImportCallKind;
+ if (callable->IsJSFunction()) {
+ SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
+ expected_arity = shared.internal_formal_parameter_count();
+ if (expected_arity != parameter_count) {
+ kind = CK::kJSFunctionArityMismatch;
+ }
+ }
+ // TODO(wasm): Think about caching and sharing the wasm-to-JS wrappers per
+ // signature instead of compiling a new one for every instantiation.
+ Handle<Code> wasm_to_js_wrapper_code =
+ compiler::CompileWasmToJSWrapper(isolate, sig, kind, expected_arity)
+ .ToHandleChecked();
+ function_data->set_wasm_to_js_wrapper_code(*wasm_to_js_wrapper_code);
+ }
+
Handle<String> name = isolate->factory()->Function_string();
if (callable->IsJSFunction()) {
name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
@@ -2012,6 +2058,7 @@ const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
return zone->New<wasm::FunctionSig>(return_count, parameter_count, types);
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index d269c8df4f..dcef1aec8b 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -16,7 +16,6 @@
#include "src/objects/objects.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/value-type.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -44,13 +43,14 @@ class WasmExternalFunction;
class WasmInstanceObject;
class WasmJSFunction;
class WasmModuleObject;
-class WasmIndirectFunctionTable;
enum class SharedFlag : uint8_t;
template <class CppType>
class Managed;
+#include "torque-generated/src/wasm/wasm-objects-tq.inc"
+
#define DECL_OPTIONAL_ACCESSORS(name, type) \
DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
@@ -666,6 +666,9 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE const wasm::FunctionSig* sig();
+ bool MatchesSignature(const wasm::WasmModule* other_module,
+ const wasm::FunctionSig* other_sig);
+
DECL_CAST(WasmExportedFunction)
OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction);
};
@@ -750,27 +753,6 @@ class WasmIndirectFunctionTable : public Struct {
OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
};
-class WasmCapiFunctionData : public Struct {
- public:
- DECL_PRIMITIVE_ACCESSORS(call_target, Address)
- DECL_ACCESSORS(embedder_data, Foreign)
- DECL_ACCESSORS(wrapper_code, Code)
- DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
-
- DECL_CAST(WasmCapiFunctionData)
-
- DECL_PRINTER(WasmCapiFunctionData)
- DECL_VERIFIER(WasmCapiFunctionData)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_CAPI_FUNCTION_DATA_FIELDS)
-
- STATIC_ASSERT(kStartOfStrongFieldsOffset == kEmbedderDataOffset);
- using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
-
- OBJECT_CONSTRUCTORS(WasmCapiFunctionData, Struct);
-};
-
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
@@ -780,10 +762,11 @@ class WasmExportedFunctionData : public Struct {
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
+ DECL_ACCESSORS(signature, Foreign)
+ DECL_INT_ACCESSORS(call_count)
DECL_ACCESSORS(c_wrapper_code, Object)
DECL_ACCESSORS(wasm_call_target, Object)
DECL_INT_ACCESSORS(packed_args_size)
- DECL_ACCESSORS(signature, Foreign)
DECL_CAST(WasmExportedFunctionData)
@@ -809,6 +792,7 @@ class WasmJSFunctionData : public Struct {
DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
+ DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
DECL_CAST(WasmJSFunctionData)
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index bd1fdfd783..fc5cfd6985 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+@useParentTypeChecker
type PodArrayOfWasmValueType extends ByteArray
-constexpr 'PodArray<wasm::ValueType>';
+ constexpr 'PodArray<wasm::ValueType>';
+@useParentTypeChecker
type ManagedWasmNativeModule extends Foreign
-constexpr 'Managed<wasm::NativeModule>';
+ constexpr 'Managed<wasm::NativeModule>';
type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
extern class WasmInstanceObject extends JSObject;
@@ -15,23 +17,26 @@ extern class WasmExportedFunctionData extends Struct {
instance: WasmInstanceObject;
jump_table_offset: Smi;
function_index: Smi;
+ signature: Foreign;
+ call_count: Smi;
// The remaining fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
c_wrapper_code: Object;
wasm_call_target: Smi|Foreign;
packed_args_size: Smi;
- signature: Foreign;
}
extern class WasmJSFunctionData extends Struct {
callable: JSReceiver;
wrapper_code: Code;
+ wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
serialized_parameter_count: Smi;
serialized_signature: PodArrayOfWasmValueType;
}
-extern class WasmCapiFunctionData extends Struct {
+@export
+class WasmCapiFunctionData extends HeapObject {
call_target: RawPtr;
embedder_data: Foreign; // Managed<wasm::FuncData>
wrapper_code: Code;
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index e050d12947..5e0f172bd5 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -35,7 +35,6 @@ namespace wasm {
#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
-#define CASE_V64x2_OP(name, str) CASE_OP(V64x2##name, "v64x2." str)
#define CASE_V32x4_OP(name, str) CASE_OP(V32x4##name, "v32x4." str)
#define CASE_V16x8_OP(name, str) CASE_OP(V16x8##name, "v16x8." str)
#define CASE_V8x16_OP(name, str) CASE_OP(V8x16##name, "v8x16." str)
@@ -235,7 +234,8 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Neg, "neg")
CASE_SIMDF_OP(Sqrt, "sqrt")
CASE_SIMD_OP(Eq, "eq")
- CASE_SIMD_OP(Ne, "ne")
+ CASE_SIMDF_OP(Ne, "ne")
+ CASE_SIMDI_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
@@ -267,27 +267,23 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
- CASE_SIGN_OP(I64x2, Min, "min")
CASE_SIGN_OP(SIMDI, Max, "max")
- CASE_SIGN_OP(I64x2, Max, "max")
CASE_SIGN_OP(SIMDI, Lt, "lt")
- CASE_SIGN_OP(I64x2, Lt, "lt")
CASE_SIGN_OP(SIMDI, Le, "le")
- CASE_SIGN_OP(I64x2, Le, "le")
CASE_SIGN_OP(SIMDI, Gt, "gt")
- CASE_SIGN_OP(I64x2, Gt, "gt")
CASE_SIGN_OP(SIMDI, Ge, "ge")
- CASE_SIGN_OP(I64x2, Ge, "ge")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32", "convert")
CASE_SIGN_OP(SIMDI, Shr, "shr")
CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
CASE_I64x2_OP(Shl, "shl")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
- CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
- CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_SIGN_OP(I16x8, AddSat, "add_sat")
+ CASE_SIGN_OP(I8x16, AddSat, "add_sat")
+ CASE_SIGN_OP(I16x8, SubSat, "sub_sat")
+ CASE_SIGN_OP(I8x16, SubSat, "sub_sat")
CASE_S128_OP(And, "and")
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
@@ -298,13 +294,11 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I8x16_OP(Shuffle, "shuffle")
CASE_SIMDV_OP(AnyTrue, "any_true")
CASE_SIMDV_OP(AllTrue, "all_true")
- CASE_V64x2_OP(AnyTrue, "any_true")
- CASE_V64x2_OP(AllTrue, "all_true")
CASE_SIMDF_OP(Qfma, "qfma")
CASE_SIMDF_OP(Qfms, "qfms")
- CASE_S128_OP(LoadMem32Zero, "load32_zero")
- CASE_S128_OP(LoadMem64Zero, "load64_zero")
+ CASE_S128_OP(Load32Zero, "load32_zero")
+ CASE_S128_OP(Load64Zero, "load64_zero")
CASE_S128_OP(Load8Splat, "load8_splat")
CASE_S128_OP(Load16Splat, "load16_splat")
CASE_S128_OP(Load32Splat, "load32_splat")
@@ -315,17 +309,28 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Load16x4U, "load16x4_u")
CASE_S128_OP(Load32x2S, "load32x2_s")
CASE_S128_OP(Load32x2U, "load32x2_u")
+ CASE_S128_OP(Load8Lane, "load8_lane")
+ CASE_S128_OP(Load16Lane, "load16_lane")
+ CASE_S128_OP(Load32Lane, "load32_lane")
+ CASE_S128_OP(Load64Lane, "load64_lane")
+ CASE_S128_OP(Store8Lane, "store8_lane")
+ CASE_S128_OP(Store16Lane, "store16_lane")
+ CASE_S128_OP(Store32Lane, "store32_lane")
+ CASE_S128_OP(Store64Lane, "store64_lane")
CASE_I8x16_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(RoundingAverageU, "avgr_u")
+ CASE_I16x8_OP(Q15MulRSatS, "q15mulr_sat_s")
CASE_I8x16_OP(Abs, "abs")
+ CASE_I8x16_OP(Popcnt, "popcnt")
CASE_I16x8_OP(Abs, "abs")
CASE_I32x4_OP(Abs, "abs")
CASE_I8x16_OP(BitMask, "bitmask")
CASE_I16x8_OP(BitMask, "bitmask")
CASE_I32x4_OP(BitMask, "bitmask")
+ CASE_I64x2_OP(BitMask, "bitmask")
CASE_F32x4_OP(Pmin, "pmin")
CASE_F32x4_OP(Pmax, "pmax")
@@ -343,6 +348,18 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
+ CASE_SIGN_OP(I16x8, ExtMulLowI8x16, "extmul_low_i8x16")
+ CASE_SIGN_OP(I16x8, ExtMulHighI8x16, "extmul_high_i8x16")
+ CASE_SIGN_OP(I32x4, ExtMulLowI16x8, "extmul_low_i16x8")
+ CASE_SIGN_OP(I32x4, ExtMulHighI16x8, "extmul_high_i16x8")
+ CASE_SIGN_OP(I64x2, ExtMulLowI32x4, "extmul_low_i32x4")
+ CASE_SIGN_OP(I64x2, ExtMulHighI32x4, "extmul_high_i32x4")
+ CASE_SIMDI_OP(SignSelect, "signselect")
+ CASE_I64x2_OP(SignSelect, "signselect")
+
+ CASE_SIGN_OP(I32x4, ExtAddPairwiseI16x8, "extadd_pairwise_i16x8")
+ CASE_SIGN_OP(I16x8, ExtAddPairwiseI8x16, "extadd_pairwise_i8x6")
+
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 04767f53a2..76812446a9 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -21,8 +21,9 @@ class WasmFeatures;
struct WasmModule;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
- const WasmFeatures&);
+bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
+ const WasmModule* module,
+ const WasmFeatures&);
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
@@ -287,7 +288,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(S128Load16Splat, 0xfd08, s_i) \
V(S128Load32Splat, 0xfd09, s_i) \
V(S128Load64Splat, 0xfd0a, s_i) \
- V(S128StoreMem, 0xfd0b, v_is)
+ V(S128StoreMem, 0xfd0b, v_is) \
+ V(S128Load32Zero, 0xfdfc, s_i) \
+ V(S128Load64Zero, 0xfdfd, s_i)
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
@@ -360,11 +363,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I8x16ShrS, 0xfd6c, s_si) \
V(I8x16ShrU, 0xfd6d, s_si) \
V(I8x16Add, 0xfd6e, s_ss) \
- V(I8x16AddSaturateS, 0xfd6f, s_ss) \
- V(I8x16AddSaturateU, 0xfd70, s_ss) \
+ V(I8x16AddSatS, 0xfd6f, s_ss) \
+ V(I8x16AddSatU, 0xfd70, s_ss) \
V(I8x16Sub, 0xfd71, s_ss) \
- V(I8x16SubSaturateS, 0xfd72, s_ss) \
- V(I8x16SubSaturateU, 0xfd73, s_ss) \
+ V(I8x16SubSatS, 0xfd72, s_ss) \
+ V(I8x16SubSatU, 0xfd73, s_ss) \
V(I8x16MinS, 0xfd76, s_ss) \
V(I8x16MinU, 0xfd77, s_ss) \
V(I8x16MaxS, 0xfd78, s_ss) \
@@ -385,11 +388,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I16x8ShrS, 0xfd8c, s_si) \
V(I16x8ShrU, 0xfd8d, s_si) \
V(I16x8Add, 0xfd8e, s_ss) \
- V(I16x8AddSaturateS, 0xfd8f, s_ss) \
- V(I16x8AddSaturateU, 0xfd90, s_ss) \
+ V(I16x8AddSatS, 0xfd8f, s_ss) \
+ V(I16x8AddSatU, 0xfd90, s_ss) \
V(I16x8Sub, 0xfd91, s_ss) \
- V(I16x8SubSaturateS, 0xfd92, s_ss) \
- V(I16x8SubSaturateU, 0xfd93, s_ss) \
+ V(I16x8SubSatS, 0xfd92, s_ss) \
+ V(I16x8SubSatU, 0xfd93, s_ss) \
V(I16x8Mul, 0xfd95, s_ss) \
V(I16x8MinS, 0xfd96, s_ss) \
V(I16x8MinU, 0xfd97, s_ss) \
@@ -415,6 +418,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I32x4MinU, 0xfdb7, s_ss) \
V(I32x4MaxS, 0xfdb8, s_ss) \
V(I32x4MaxU, 0xfdb9, s_ss) \
+ V(I32x4DotI16x8S, 0xfdba, s_ss) \
V(I64x2Neg, 0xfdc1, s_s) \
V(I64x2Shl, 0xfdcb, s_si) \
V(I64x2ShrS, 0xfdcc, s_si) \
@@ -458,36 +462,53 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(F64x2NearestInt, 0xfddf, s_s)
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
- V(S128LoadMem32Zero, 0xfdfc, s_i) \
- V(S128LoadMem64Zero, 0xfdfd, s_i)
-
-#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
- V(I8x16Mul, 0xfd75, s_ss) \
- V(V64x2AnyTrue, 0xfdc2, i_s) \
- V(V64x2AllTrue, 0xfdc3, i_s) \
- V(I64x2Eq, 0xfdc0, s_ss) \
- V(I64x2Ne, 0xfdc4, s_ss) \
- V(I64x2LtS, 0xfdc5, s_ss) \
- V(I64x2LtU, 0xfdc6, s_ss) \
- V(I64x2GtS, 0xfdc7, s_ss) \
- V(I64x2GtU, 0xfdc8, s_ss) \
- V(I64x2LeS, 0xfdc9, s_ss) \
- V(I64x2LeU, 0xfdca, s_ss) \
- V(I64x2GeS, 0xfdcf, s_ss) \
- V(I64x2GeU, 0xfdd0, s_ss) \
- V(I64x2MinS, 0xfdd6, s_ss) \
- V(I64x2MinU, 0xfdd7, s_ss) \
- V(I64x2MaxS, 0xfde2, s_ss) \
- V(I64x2MaxU, 0xfdee, s_ss) \
- V(F32x4Qfma, 0xfdb4, s_sss) \
- V(F32x4Qfms, 0xfdd4, s_sss) \
- V(F64x2Qfma, 0xfdfe, s_sss) \
- V(F64x2Qfms, 0xfdff, s_sss) \
- V(I16x8AddHoriz, 0xfdaf, s_ss) \
- V(I32x4AddHoriz, 0xfdb0, s_ss) \
- V(I32x4DotI16x8S, 0xfdba, s_ss) \
- V(F32x4AddHoriz, 0xfdb2, s_ss) \
- V(F32x4RecipApprox, 0xfdb3, s_s) \
+ V(S128Load8Lane, 0xfd58, s_is) \
+ V(S128Load16Lane, 0xfd59, s_is) \
+ V(S128Load32Lane, 0xfd5a, s_is) \
+ V(S128Load64Lane, 0xfd5b, s_is) \
+ V(S128Store8Lane, 0xfd5c, v_is) \
+ V(S128Store16Lane, 0xfd5d, v_is) \
+ V(S128Store32Lane, 0xfd5e, v_is) \
+ V(S128Store64Lane, 0xfd5f, v_is)
+
+#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
+ V(I8x16Mul, 0xfd75, s_ss) \
+ V(I8x16Popcnt, 0xfd7c, s_s) \
+ V(I8x16SignSelect, 0xfd7d, s_sss) \
+ V(I16x8SignSelect, 0xfd7e, s_sss) \
+ V(I32x4SignSelect, 0xfd7f, s_sss) \
+ V(I64x2SignSelect, 0xfd94, s_sss) \
+ V(I16x8Q15MulRSatS, 0xfd9c, s_ss) \
+ V(I16x8ExtMulLowI8x16S, 0xfd9a, s_ss) \
+ V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss) \
+ V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss) \
+ V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss) \
+ V(I32x4ExtMulLowI16x8S, 0xfdbb, s_ss) \
+ V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss) \
+ V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss) \
+ V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss) \
+ V(I64x2ExtMulLowI32x4S, 0xfdd2, s_ss) \
+ V(I64x2ExtMulHighI32x4S, 0xfdd3, s_ss) \
+ V(I64x2ExtMulLowI32x4U, 0xfdd6, s_ss) \
+ V(I64x2ExtMulHighI32x4U, 0xfdd7, s_ss) \
+ V(I32x4ExtAddPairwiseI16x8S, 0xfda5, s_s) \
+ V(I32x4ExtAddPairwiseI16x8U, 0xfda6, s_s) \
+ V(I16x8ExtAddPairwiseI8x16S, 0xfdc2, s_s) \
+ V(I16x8ExtAddPairwiseI8x16U, 0xfdc3, s_s) \
+ V(I64x2Eq, 0xfdc0, s_ss) \
+ V(F32x4Qfma, 0xfdb4, s_sss) \
+ V(I64x2BitMask, 0xfdc4, i_s) \
+ V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
+ V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
+ V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
+ V(I64x2UConvertI32x4High, 0xfdca, s_s) \
+ V(F32x4Qfms, 0xfdd4, s_sss) \
+ V(F64x2Qfma, 0xfdfe, s_sss) \
+ V(F64x2Qfms, 0xfdff, s_sss) \
+ V(I16x8AddHoriz, 0xfdaf, s_ss) \
+ V(I32x4AddHoriz, 0xfdb0, s_ss) \
+ V(F32x4AddHoriz, 0xfdb2, s_ss) \
+ V(F32x4RecipApprox, 0xfdb3, s_s) \
V(F32x4RecipSqrtApprox, 0xfdbc, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -700,17 +721,18 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef)
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kWasmS128, kWasmS128) \
- V(s_f, kWasmS128, kWasmF32) \
- V(s_d, kWasmS128, kWasmF64) \
- V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
- V(s_i, kWasmS128, kWasmI32) \
- V(s_l, kWasmS128, kWasmI64) \
- V(s_si, kWasmS128, kWasmS128, kWasmI32) \
- V(i_s, kWasmI32, kWasmS128) \
- V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
- V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_d, kWasmS128, kWasmF64) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_l, kWasmS128, kWasmI64) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
+ V(i_s, kWasmI32, kWasmS128) \
+ V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
+ V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_is, kWasmS128, kWasmI32, kWasmS128)
#define FOREACH_PREFIX(V) \
V(Numeric, 0xfc) \
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 784dd0f615..8f0d5427aa 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -63,6 +63,8 @@ template <typename T>
class Result {
public:
Result() = default;
+ Result(const Result&) = delete;
+ Result& operator=(const Result&) = delete;
template <typename S>
explicit Result(S&& value) : value_(std::forward<S>(value)) {}
@@ -104,8 +106,6 @@ class Result {
T value_ = T{};
WasmError error_;
-
- DISALLOW_COPY_AND_ASSIGN(Result);
};
// A helper for generating error messages that bubble up to JS exceptions.
@@ -113,8 +113,10 @@ class V8_EXPORT_PRIVATE ErrorThrower {
public:
ErrorThrower(Isolate* isolate, const char* context)
: isolate_(isolate), context_(context) {}
- // Explicitly allow move-construction. Disallow copy (below).
+ // Explicitly allow move-construction. Disallow copy.
ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT;
+ ErrorThrower(const ErrorThrower&) = delete;
+ ErrorThrower& operator=(const ErrorThrower&) = delete;
~ErrorThrower();
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
@@ -165,7 +167,6 @@ class V8_EXPORT_PRIVATE ErrorThrower {
// ErrorThrower should always be stack-allocated, since it constitutes a scope
// (things happen in the destructor).
DISALLOW_NEW_AND_DELETE()
- DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
// Like an ErrorThrower, but turns all pending exceptions into scheduled
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index f4f5f99268..1c73fc5c41 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -208,6 +208,9 @@ constexpr size_t kCodeHeaderSize = sizeof(bool) + // whether code is present
// a tag from the Address of an external reference and vice versa.
class ExternalReferenceList {
public:
+ ExternalReferenceList(const ExternalReferenceList&) = delete;
+ ExternalReferenceList& operator=(const ExternalReferenceList&) = delete;
+
uint32_t tag_from_address(Address ext_ref_address) const {
auto tag_addr_less_than = [this](uint32_t tag, Address searched_addr) {
return external_reference_by_tag_[tag] < searched_addr;
@@ -263,7 +266,6 @@ class ExternalReferenceList {
#undef RUNTIME_ADDR
};
uint32_t tags_ordered_by_address_[kNumExternalReferences];
- DISALLOW_COPY_AND_ASSIGN(ExternalReferenceList);
};
static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
@@ -273,8 +275,9 @@ static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
- NativeModuleSerializer() = delete;
NativeModuleSerializer(const NativeModule*, Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModuleSerializer&) = delete;
+ NativeModuleSerializer& operator=(const NativeModuleSerializer&) = delete;
size_t Measure() const;
bool Write(Writer* writer);
@@ -287,8 +290,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
const NativeModule* const native_module_;
Vector<WasmCode* const> code_table_;
bool write_called_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
NativeModuleSerializer::NativeModuleSerializer(
@@ -468,8 +469,9 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
- NativeModuleDeserializer() = delete;
explicit NativeModuleDeserializer(NativeModule*);
+ NativeModuleDeserializer(const NativeModuleDeserializer&) = delete;
+ NativeModuleDeserializer& operator=(const NativeModuleDeserializer&) = delete;
bool Read(Reader* reader);
@@ -479,8 +481,6 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
NativeModule* const native_module_;
bool read_called_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
};
NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 3926a4c7e9..81dbd3e9cb 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -46,10 +46,21 @@ class Simd128 {
const uint8_t* bytes() { return val_; }
+ template <typename T>
+ inline T to();
+
private:
uint8_t val_[16] = {0};
};
+#define DECLARE_CAST(cType, sType, name, size) \
+ template <> \
+ inline sType Simd128::to() { \
+ return to_##name(); \
+ }
+FOREACH_SIMD_TYPE(DECLARE_CAST)
+#undef DECLARE_CAST
+
// Macro for defining WasmValue methods for different types.
// Elements:
// - name (for to_<name>() method)