summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
authorYagiz Nizipli <yagiz@nizipli.com>2022-11-22 09:57:37 -0500
committerNode.js GitHub Bot <github-bot@iojs.org>2022-11-27 17:27:13 +0000
commit16e03e79688d51f95dfdbc90f5b8f56e3e995c6c (patch)
treeff2da950ca09063638de42c3ba0fd05cef610698 /deps/v8/src/wasm
parentfac00cd432dc28264859a7b304d9a59affbb6144 (diff)
downloadnode-new-16e03e79688d51f95dfdbc90f5b8f56e3e995c6c.tar.gz
deps: update V8 to 10.9.194.4
PR-URL: https://github.com/nodejs/node/pull/45579 Reviewed-By: Michaƫl Zasso <targos@protonmail.com> Reviewed-By: James M Snell <jasnell@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h21
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h66
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h38
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h1
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc562
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h25
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h25
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h520
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h20
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h5
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h5
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h35
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h26
-rw-r--r--deps/v8/src/wasm/constant-expression-interface.cc43
-rw-r--r--deps/v8/src/wasm/constant-expression-interface.h9
-rw-r--r--deps/v8/src/wasm/constant-expression.cc4
-rw-r--r--deps/v8/src/wasm/decoder.h196
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1932
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc68
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h13
-rw-r--r--deps/v8/src/wasm/function-compiler.cc11
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc326
-rw-r--r--deps/v8/src/wasm/module-compiler.cc255
-rw-r--r--deps/v8/src/wasm/module-compiler.h7
-rw-r--r--deps/v8/src/wasm/module-decoder-impl.h384
-rw-r--r--deps/v8/src/wasm/module-decoder.cc5
-rw-r--r--deps/v8/src/wasm/module-decoder.h3
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc29
-rw-r--r--deps/v8/src/wasm/names-provider.cc8
-rw-r--r--deps/v8/src/wasm/names-provider.h8
-rw-r--r--deps/v8/src/wasm/pgo.cc82
-rw-r--r--deps/v8/src/wasm/pgo.h32
-rw-r--r--deps/v8/src/wasm/string-builder-multiline.h4
-rw-r--r--deps/v8/src/wasm/string-builder.h5
-rw-r--r--deps/v8/src/wasm/value-type.h35
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h4
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc5
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h9
-rw-r--r--deps/v8/src/wasm/wasm-constants.h16
-rw-r--r--deps/v8/src/wasm/wasm-disassembler-impl.h14
-rw-r--r--deps/v8/src/wasm/wasm-disassembler.cc140
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc94
-rw-r--r--deps/v8/src/wasm/wasm-engine.h9
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc77
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h6
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h28
-rw-r--r--deps/v8/src/wasm/wasm-features.cc7
-rw-r--r--deps/v8/src/wasm/wasm-features.h7
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h4
-rw-r--r--deps/v8/src/wasm/wasm-js.cc165
-rw-r--r--deps/v8/src/wasm/wasm-module.h54
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h4
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc99
-rw-r--r--deps/v8/src/wasm/wasm-objects.h5
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h16
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc74
57 files changed, 3147 insertions, 2514 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 324b4860c8..3f0ae37a40 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -464,6 +464,23 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ // On ARM, we must push at least {lr} before calling the stub, otherwise
+ // it would get clobbered with no possibility to recover it.
+ Register scratch = r7;
+ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
+ PushCommonFrame(scratch);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
@@ -496,6 +513,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset,
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index b2b3c3ff00..b6b06b2b9e 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -58,10 +58,9 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
-// -4 | tiering budget |
// -----+--------------------+---------------------------
-// -5 | slot 0 | ^
-// -6 | slot 1 | |
+// -4 | slot 0 | ^
+// -5 | slot 1 | |
// | | Frame slots
// | | |
// | | v
@@ -214,10 +213,10 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
// AnyTrue does not depend on the number of lanes, so we can use V4S for all.
UseScratchRegisterScope scope(assm);
- VRegister temp = scope.AcquireV(kFormatS);
- assm->Umaxv(temp, src.fp().V4S());
- assm->Umov(dst.gp().W(), temp, 0);
- assm->Cmp(dst.gp().W(), 0);
+ VRegister temp = scope.AcquireV(kFormat4S);
+ assm->Umaxp(temp, src.fp().V4S(), src.fp().V4S());
+ assm->Fmov(dst.gp().X(), temp.D());
+ assm->Cmp(dst.gp().X(), 0);
assm->Cset(dst.gp().W(), ne);
}
@@ -244,6 +243,22 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ // On ARM64, we must push at least {lr} before calling the stub, otherwise
+ // it would get clobbered with no possibility to recover it. So just set
+ // up the frame here.
+ EnterFrame(StackFrame::WASM);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
@@ -283,27 +298,18 @@ void LiftoffAssembler::AlignFrameSize() {
// The frame_size includes the frame marker. The frame marker has already been
// pushed on the stack though, so we don't need to allocate memory for it
// anymore.
- int initial_frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
- int frame_size = initial_frame_size;
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
static_assert(kStackSlotSize == kXRegSize,
"kStackSlotSize must equal kXRegSize");
+
// The stack pointer is required to be quadword aligned.
// Misalignment will cause a stack alignment fault.
- frame_size = RoundUp(frame_size, kQuadWordSizeInBytes);
- if (!IsImmAddSub(frame_size)) {
- // Round the stack to a page to try to fit a add/sub immediate.
- frame_size = RoundUp(frame_size, 0x1000);
- if (!IsImmAddSub(frame_size)) {
- // Stack greater than 4M! Because this is a quite improbable case, we
- // just fallback to TurboFan.
- bailout(kOtherReason, "Stack too big");
- return;
- }
- }
- if (frame_size > initial_frame_size) {
- // Record the padding, as it is needed for GC offsets later.
- max_used_spill_offset_ += (frame_size - initial_frame_size);
+ int misalignment = frame_size % kQuadWordSizeInBytes;
+ if (misalignment) {
+ int padding = kQuadWordSizeInBytes - misalignment;
+ frame_size += padding;
+ max_used_spill_offset_ += padding;
}
}
@@ -313,11 +319,15 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector, and an unused
+ // slot for alignment.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size = std::max(frame_size - 2 * kSystemPointerSize, 0);
+ }
// The stack pointer is required to be quadword aligned.
// Misalignment will cause a stack alignment fault.
DCHECK_EQ(frame_size, RoundUp(frame_size, kQuadWordSizeInBytes));
- DCHECK(IsImmAddSub(frame_size));
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset, 1);
@@ -325,6 +335,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
if (V8_LIKELY(frame_size < 4 * KB)) {
// This is the standard case for small frames: just subtract from SP and be
// done with it.
+ DCHECK(IsImmAddSub(frame_size));
patching_assembler.PatchSubSp(frame_size);
return;
}
@@ -1580,7 +1591,12 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRtt:
DCHECK(rhs.is_valid());
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
- V8_FALLTHROUGH;
+#if defined(V8_COMPRESS_POINTERS)
+ Cmp(lhs.W(), rhs.W());
+#else
+ Cmp(lhs.X(), rhs.X());
+#endif
+ break;
case kI64:
if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X());
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index fad96ab52e..3fcbf73976 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -205,6 +205,18 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
// Push the return address and frame pointer to complete the stack frame.
@@ -234,6 +246,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
DCHECK_EQ(0, frame_size % kSystemPointerSize);
// We can't run out of space when patching, just pass anything big enough to
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index ae074df6f9..8a2881441d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -21,6 +21,9 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = {eax, ecx, edx, esi, edi};
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {xmm0, xmm1, xmm2, xmm3,
xmm4, xmm5, xmm6};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = edi;
+
#elif V8_TARGET_ARCH_X64
// r10: kScratchRegister (MacroAssembler)
@@ -33,6 +36,9 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = {rax, rcx, rdx, rbx, rsi,
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {xmm0, xmm1, xmm2, xmm3,
xmm4, xmm5, xmm6, xmm7};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = r12;
+
#elif V8_TARGET_ARCH_MIPS
constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, t0, t1, t2,
@@ -49,6 +55,9 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, a4, a5, a6,
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = t0;
+
#elif V8_TARGET_ARCH_LOONG64
// t6-t8 and s3-s4: scratch registers, s6: root
@@ -61,6 +70,9 @@ constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = t0;
+
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
@@ -71,6 +83,9 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = {r0, r1, r2, r3, r4,
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = r4;
+
#elif V8_TARGET_ARCH_ARM64
// x16: ip0, x17: ip1, x18: platform register, x26: root, x28: base, x29: fp,
@@ -84,6 +99,9 @@ constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14,
d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = x8;
+
#elif V8_TARGET_ARCH_S390X
constexpr RegList kLiftoffAssemblerGpCacheRegs = {r2, r3, r4, r5,
@@ -92,14 +110,20 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = {r2, r3, r4, r5,
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = r7;
+
#elif V8_TARGET_ARCH_PPC64
-constexpr RegList kLiftoffAssemblerGpCacheRegs = {r3, r4, r5, r6, r7,
- r8, r9, r10, r11, cp};
+constexpr RegList kLiftoffAssemblerGpCacheRegs = {r3, r4, r5, r6, r7, r8,
+ r9, r10, r11, r15, cp};
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12};
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = r15;
+
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedGpRegs in frame-constants-riscv.h
@@ -113,6 +137,9 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = {a0, a1, a2, a3, a4, a5,
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs = {
ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11};
+
+// For the "WasmLiftoffFrameSetup" builtin.
+constexpr Register kLiftoffFrameSetupFunctionReg = t0;
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = RegList::FromBits(0xff);
@@ -121,6 +148,13 @@ constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs =
DoubleRegList::FromBits(0xff);
#endif
+
+static_assert(kLiftoffFrameSetupFunctionReg != kWasmInstanceRegister);
+static_assert(kLiftoffFrameSetupFunctionReg != kRootRegister);
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+static_assert(kLiftoffFrameSetupFunctionReg != kPtrComprCageBaseRegister);
+#endif
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 189509e724..4218a232d5 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -752,6 +752,7 @@ class LiftoffAssembler : public TurboAssembler {
// which can later be patched (via {PatchPrepareStackFrame)} when the size of
// the frame is known.
inline int PrepareStackFrame();
+ inline void CallFrameSetupStub(int declared_function_index);
inline void PrepareTailCall(int num_callee_stack_params,
int stack_param_delta);
inline void AlignFrameSize();
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 88e2da86cd..9443ea7db1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -138,12 +138,6 @@ compiler::CallDescriptor* GetLoweredCallDescriptor(
: call_desc;
}
-constexpr LiftoffRegList GetGpParamRegisters() {
- LiftoffRegList registers;
- for (auto reg : kGpParamRegisters) registers.set(reg);
- return registers;
-}
-
constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
switch (opcode) {
case kExprI32Eq:
@@ -346,10 +340,9 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
class LiftoffCompiler {
public:
- // TODO(clemensb): Make this a template parameter.
- static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation;
+ using ValidationTag = Decoder::BooleanValidationTag;
- using Value = ValueBase<validate>;
+ using Value = ValueBase<ValidationTag>;
struct ElseState {
MovableLabel label;
@@ -364,7 +357,7 @@ class LiftoffCompiler {
bool in_handler = false;
};
- struct Control : public ControlBase<Value, validate> {
+ struct Control : public ControlBase<Value, ValidationTag> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
@@ -379,7 +372,7 @@ class LiftoffCompiler {
: ControlBase(std::forward<Args>(args)...) {}
};
- using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ using FullDecoder = WasmFullDecoder<ValidationTag, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
class MostlySmallValueKindSig : public Signature<ValueKind> {
@@ -881,7 +874,15 @@ class LiftoffCompiler {
__ CodeEntry();
- __ EnterFrame(StackFrame::WASM);
+ if (v8_flags.wasm_speculative_inlining) {
+ CODE_COMMENT("frame setup");
+ int declared_func_index =
+ func_index_ - env_->module->num_imported_functions;
+ DCHECK_GE(declared_func_index, 0);
+ __ CallFrameSetupStub(declared_func_index);
+ } else {
+ __ EnterFrame(StackFrame::WASM);
+ }
__ set_has_frame(true);
pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
// {PrepareStackFrame} is the first platform-specific assembler method.
@@ -901,23 +902,7 @@ class LiftoffCompiler {
.AsRegister()));
USE(kInstanceParameterIndex);
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
- // Load the feedback vector and cache it in a stack slot.
- constexpr LiftoffRegList kGpParamRegisters = GetGpParamRegisters();
- if (v8_flags.wasm_speculative_inlining) {
- CODE_COMMENT("load feedback vector");
- int declared_func_index =
- func_index_ - env_->module->num_imported_functions;
- DCHECK_GE(declared_func_index, 0);
- LiftoffRegList pinned = kGpParamRegisters;
- LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadTaggedPointerFromInstance(
- tmp.gp(), kWasmInstanceRegister,
- WASM_INSTANCE_OBJECT_FIELD_OFFSET(FeedbackVectors));
- __ LoadTaggedPointer(tmp.gp(), tmp.gp(), no_reg,
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
- declared_func_index));
- __ Spill(liftoff::kFeedbackVectorOffset, tmp, kPointerKind);
- }
+
if (for_debugging_) __ ResetOSRTarget();
if (num_params) {
@@ -1213,7 +1198,7 @@ class LiftoffCompiler {
SLOW_DCHECK(__ ValidateCacheState());
CODE_COMMENT(WasmOpcodes::OpcodeName(
WasmOpcodes::IsPrefixOpcode(opcode)
- ? decoder->read_prefixed_opcode<Decoder::kFullValidation>(
+ ? decoder->read_prefixed_opcode<Decoder::FullValidationTag>(
decoder->pc())
: opcode));
}
@@ -1293,9 +1278,8 @@ class LiftoffCompiler {
return LiftoffRegister(kReturnRegister0);
}
- void CatchException(FullDecoder* decoder,
- const TagIndexImmediate<validate>& imm, Control* block,
- base::Vector<Value> values) {
+ void CatchException(FullDecoder* decoder, const TagIndexImmediate& imm,
+ Control* block, base::Vector<Value> values) {
DCHECK(block->is_try_catch());
__ emit_jump(block->label.get());
@@ -1842,54 +1826,27 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister());
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
- LoadNullValue(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned);
// Prefer to overwrite one of the input registers with the result
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
+#if defined(V8_COMPRESS_POINTERS)
+ // As the value in the {null} register is only the tagged pointer part,
+ // we may only compare 32 bits, not the full pointer size.
+ __ emit_i32_set_cond(opcode == kExprRefIsNull ? kEqual : kUnequal,
+ dst.gp(), ref.gp(), null.gp());
+#else
__ emit_ptrsize_set_cond(opcode == kExprRefIsNull ? kEqual : kUnequal,
dst.gp(), ref, null);
+#endif
__ PushRegister(kI32, dst);
return;
}
case kExprExternInternalize:
- if (!v8_flags.wasm_gc_js_interop) {
- LiftoffRegList pinned;
- LiftoffRegister context_reg =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext,
- pinned);
- LiftoffAssembler::VarState& extern_value =
- __ cache_state()->stack_state.back();
-
- LiftoffAssembler::VarState context(kPointerKind, context_reg, 0);
-
- CallRuntimeStub(
- WasmCode::kWasmExternInternalize,
- MakeSig::Returns(kPointerKind).Params(kPointerKind, kPointerKind),
- {extern_value, context}, decoder->position());
- __ DropValues(1);
- __ PushRegister(kRefNull, LiftoffRegister(kReturnRegister0));
- }
+ // TODO(7748): Canonicalize heap numbers.
return;
case kExprExternExternalize:
- if (!v8_flags.wasm_gc_js_interop) {
- LiftoffRegList pinned;
- LiftoffRegister context_reg =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext,
- pinned);
- LiftoffAssembler::VarState& value =
- __ cache_state()->stack_state.back();
-
- LiftoffAssembler::VarState context(kPointerKind, context_reg, 0);
-
- CallRuntimeStub(
- WasmCode::kWasmExternExternalize,
- MakeSig::Returns(kPointerKind).Params(kPointerKind, kPointerKind),
- {value, context}, decoder->position());
- __ DropValues(1);
- __ PushRegister(kRefNull, LiftoffRegister(kReturnRegister0));
- }
+ // This is a no-op.
return;
default:
UNREACHABLE();
@@ -2399,7 +2356,7 @@ class LiftoffCompiler {
}
void LocalGet(FullDecoder* decoder, Value* result,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back(
local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
@@ -2463,12 +2420,12 @@ class LiftoffCompiler {
}
void LocalSet(FullDecoder* decoder, const Value& value,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
LocalSet(imm.index, false);
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
LocalSet(imm.index, true);
}
@@ -2525,7 +2482,7 @@ class LiftoffCompiler {
}
void GlobalGet(FullDecoder* decoder, Value* result,
- const GlobalIndexImmediate<validate>& imm) {
+ const GlobalIndexImmediate& imm) {
const auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
@@ -2567,7 +2524,7 @@ class LiftoffCompiler {
}
void GlobalSet(FullDecoder* decoder, const Value&,
- const GlobalIndexImmediate<validate>& imm) {
+ const GlobalIndexImmediate& imm) {
auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
@@ -2607,7 +2564,7 @@ class LiftoffCompiler {
}
void TableGet(FullDecoder* decoder, const Value&, Value*,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -2634,7 +2591,7 @@ class LiftoffCompiler {
}
void TableSet(FullDecoder* decoder, const Value&, const Value&,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -2679,21 +2636,30 @@ class LiftoffCompiler {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
- void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) {
+ void AssertNullImpl(FullDecoder* decoder, const Value& arg, Value* result,
+ LiftoffCondition cond) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
- LoadNullValue(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned);
{
FREEZE_STATE(trapping);
- __ emit_cond_jump(kUnequal, trap_label, kRefNull, obj.gp(), null.gp(),
+ __ emit_cond_jump(cond, trap_label, kRefNull, obj.gp(), null.gp(),
trapping);
}
__ PushRegister(kRefNull, obj);
}
+ void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) {
+ AssertNullImpl(decoder, arg, result, kUnequal);
+ }
+
+ void AssertNotNull(FullDecoder* decoder, const Value& arg, Value* result) {
+ AssertNullImpl(decoder, arg, result, kEqual);
+ }
+
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
unsupported(decoder, kOtherReason, "testing opcode");
}
@@ -2827,7 +2793,7 @@ class LiftoffCompiler {
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
- BranchTableIterator<validate>* table_iterator,
+ BranchTableIterator<ValidationTag>* table_iterator,
std::map<uint32_t, MovableLabel>* br_targets,
Register tmp1, Register tmp2,
const FreezeCacheState& frozen) {
@@ -2855,7 +2821,7 @@ class LiftoffCompiler {
tmp1, tmp2, frozen);
}
- void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
+ void BrTable(FullDecoder* decoder, const BranchTableImmediate& imm,
const Value& key) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
@@ -2866,7 +2832,7 @@ class LiftoffCompiler {
Register tmp2 = no_reg;
if (dynamic_tiering()) {
bool need_temps = false;
- BranchTableIterator<validate> table_iterator(decoder, imm);
+ BranchTableIterator<ValidationTag> table_iterator(decoder, imm);
while (table_iterator.has_next()) {
uint32_t depth = table_iterator.next();
if (depth == decoder->control_depth() - 1 ||
@@ -2885,13 +2851,13 @@ class LiftoffCompiler {
// All targets must have the same arity (checked by validation), so
// we can just sample any of them to find that arity.
uint32_t ignored_length;
- uint32_t sample_depth = decoder->read_u32v<Decoder::kNoValidation>(
+ uint32_t sample_depth = decoder->read_u32v<Decoder::NoValidationTag>(
imm.table, &ignored_length, "first depth");
__ PrepareForBranch(decoder->control_at(sample_depth)->br_merge()->arity,
pinned);
}
- BranchTableIterator<validate> table_iterator(decoder, imm);
+ BranchTableIterator<ValidationTag> table_iterator(decoder, imm);
std::map<uint32_t, MovableLabel> br_targets;
if (imm.table_count > 0) {
@@ -3178,8 +3144,8 @@ class LiftoffCompiler {
}
void LoadMem(FullDecoder* decoder, LoadType type,
- const MemoryAccessImmediate<validate>& imm,
- const Value& index_val, Value* result) {
+ const MemoryAccessImmediate& imm, const Value& index_val,
+ Value* result) {
ValueKind kind = type.value_type().kind();
DCHECK_EQ(kind, result->type.kind());
if (!CheckSupportedType(decoder, kind, "load")) return;
@@ -3234,8 +3200,8 @@ class LiftoffCompiler {
void LoadTransform(FullDecoder* decoder, LoadType type,
LoadTransformationKind transform,
- const MemoryAccessImmediate<validate>& imm,
- const Value& index_val, Value* result) {
+ const MemoryAccessImmediate& imm, const Value& index_val,
+ Value* result) {
// LoadTransform requires SIMD support, so check for it here. If
// unsupported, bailout and let TurboFan lower the code.
if (!CheckSupportedType(decoder, kS128, "LoadTransform")) {
@@ -3278,7 +3244,7 @@ class LiftoffCompiler {
}
void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
- const Value& _index, const MemoryAccessImmediate<validate>& imm,
+ const Value& _index, const MemoryAccessImmediate& imm,
const uint8_t laneidx, Value* _result) {
if (!CheckSupportedType(decoder, kS128, "LoadLane")) {
return;
@@ -3314,8 +3280,8 @@ class LiftoffCompiler {
}
void StoreMem(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm,
- const Value& index_val, const Value& value_val) {
+ const MemoryAccessImmediate& imm, const Value& index_val,
+ const Value& value_val) {
ValueKind kind = type.value_type().kind();
DCHECK_EQ(kind, value_val.type.kind());
if (!CheckSupportedType(decoder, kind, "store")) return;
@@ -3365,8 +3331,8 @@ class LiftoffCompiler {
}
void StoreLane(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm,
- const Value& _index, const Value& _value, const uint8_t lane) {
+ const MemoryAccessImmediate& imm, const Value& _index,
+ const Value& _value, const uint8_t lane) {
if (!CheckSupportedType(decoder, kS128, "StoreLane")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
@@ -3547,15 +3513,14 @@ class LiftoffCompiler {
enum TailCall : bool { kTailCall = true, kNoTailCall = false };
- void CallDirect(FullDecoder* decoder,
- const CallFunctionImmediate<validate>& imm,
+ void CallDirect(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[], Value[]) {
CallDirect(decoder, imm, args, nullptr, kNoTailCall);
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
- const CallIndirectImmediate<validate>& imm,
- const Value args[], Value returns[]) {
+ const CallIndirectImmediate& imm, const Value args[],
+ Value returns[]) {
CallIndirect(decoder, index_val, imm, kNoTailCall);
}
@@ -3565,15 +3530,14 @@ class LiftoffCompiler {
CallRef(decoder, func_ref.type, sig, kNoTailCall);
}
- void ReturnCall(FullDecoder* decoder,
- const CallFunctionImmediate<validate>& imm,
+ void ReturnCall(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[]) {
TierupCheckOnTailCall(decoder);
CallDirect(decoder, imm, args, nullptr, kTailCall);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
- const CallIndirectImmediate<validate>& imm,
+ const CallIndirectImmediate& imm,
const Value args[]) {
TierupCheckOnTailCall(decoder);
CallIndirect(decoder, index_val, imm, kTailCall);
@@ -3603,7 +3567,7 @@ class LiftoffCompiler {
Register tmp = NeedsTierupCheck(decoder, depth)
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: no_reg;
- LoadNullValue(null, pinned);
+ LoadNullValueForCompare(null, pinned);
{
FREEZE_STATE(frozen);
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
@@ -3633,7 +3597,7 @@ class LiftoffCompiler {
Register tmp = NeedsTierupCheck(decoder, depth)
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: no_reg;
- LoadNullValue(null, pinned);
+ LoadNullValueForCompare(null, pinned);
{
FREEZE_STATE(frozen);
__ emit_cond_jump(kEqual, &cont_false, ref_object.type.kind(), ref.gp(),
@@ -3757,7 +3721,7 @@ class LiftoffCompiler {
RegClass dst_rc = reg_class_for(kS128);
LiftoffRegister dst = __ GetUnusedRegister(dst_rc, {});
(asm_.*emit_fn)(dst, src1, src2, src3);
- __ PushRegister(kS128, src1);
+ __ PushRegister(kS128, dst);
return;
}
@@ -4333,8 +4297,7 @@ class LiftoffCompiler {
}
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
- void EmitSimdExtractLaneOp(EmitFn fn,
- const SimdLaneImmediate<validate>& imm) {
+ void EmitSimdExtractLaneOp(EmitFn fn, const SimdLaneImmediate& imm) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister lhs = __ PopToRegister();
@@ -4346,8 +4309,7 @@ class LiftoffCompiler {
}
template <ValueKind src2_kind, typename EmitFn>
- void EmitSimdReplaceLaneOp(EmitFn fn,
- const SimdLaneImmediate<validate>& imm) {
+ void EmitSimdReplaceLaneOp(EmitFn fn, const SimdLaneImmediate& imm) {
static constexpr RegClass src1_rc = reg_class_for(kS128);
static constexpr RegClass src2_rc = reg_class_for(src2_kind);
static constexpr RegClass result_rc = reg_class_for(kS128);
@@ -4371,7 +4333,7 @@ class LiftoffCompiler {
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
- const SimdLaneImmediate<validate>& imm,
+ const SimdLaneImmediate& imm,
const base::Vector<Value> inputs, Value* result) {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
@@ -4415,7 +4377,7 @@ class LiftoffCompiler {
}
}
- void S128Const(FullDecoder* decoder, const Simd128Immediate<validate>& imm,
+ void S128Const(FullDecoder* decoder, const Simd128Immediate& imm,
Value* result) {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
@@ -4437,8 +4399,7 @@ class LiftoffCompiler {
__ PushRegister(kS128, dst);
}
- void Simd8x16ShuffleOp(FullDecoder* decoder,
- const Simd128Immediate<validate>& imm,
+ void Simd8x16ShuffleOp(FullDecoder* decoder, const Simd128Immediate& imm,
const Value& input0, const Value& input1,
Value* result) {
if (!CpuFeatures::SupportsWasmSimd128()) {
@@ -4711,7 +4672,7 @@ class LiftoffCompiler {
__ DropValues(1);
}
- void Throw(FullDecoder* decoder, const TagIndexImmediate<validate>& imm,
+ void Throw(FullDecoder* decoder, const TagIndexImmediate& imm,
const base::Vector<Value>& /* args */) {
LiftoffRegList pinned;
@@ -4762,13 +4723,15 @@ class LiftoffCompiler {
LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
decoder->position());
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+
int pc_offset = __ pc_offset();
MaybeOSR();
EmitLandingPad(decoder, pc_offset);
}
void AtomicStoreMem(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm) {
+ const MemoryAccessImmediate& imm) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned);
@@ -4791,7 +4754,7 @@ class LiftoffCompiler {
}
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
- const MemoryAccessImmediate<validate>& imm) {
+ const MemoryAccessImmediate& imm) {
ValueKind kind = type.value_type().kind();
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
@@ -4815,7 +4778,7 @@ class LiftoffCompiler {
}
void AtomicBinop(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm,
+ const MemoryAccessImmediate& imm,
void (LiftoffAssembler::*emit_fn)(Register, Register,
uintptr_t, LiftoffRegister,
LiftoffRegister,
@@ -4856,7 +4819,7 @@ class LiftoffCompiler {
}
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm) {
+ const MemoryAccessImmediate& imm) {
#ifdef V8_TARGET_ARCH_IA32
// On ia32 we don't have enough registers to first pop all the values off
// the stack and then start with the code generation. Instead we do the
@@ -4941,45 +4904,81 @@ class LiftoffCompiler {
}
void AtomicWait(FullDecoder* decoder, ValueKind kind,
- const MemoryAccessImmediate<validate>& imm) {
- LiftoffRegister full_index = __ PeekToRegister(2, {});
- Register index_reg =
- BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
- {}, kDoForceCheck);
- if (index_reg == no_reg) return;
- LiftoffRegList pinned{index_reg};
- AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
- pinned);
+ const MemoryAccessImmediate& imm) {
+ {
+ LiftoffRegList pinned;
+ LiftoffRegister full_index = __ PeekToRegister(2, pinned);
+ Register index_reg =
+ BoundsCheckMem(decoder, value_kind_size(kind), imm.offset, full_index,
+ pinned, kDoForceCheck);
+ if (index_reg == no_reg) return;
+ pinned.set(index_reg);
+ AlignmentCheckMem(decoder, value_kind_size(kind), imm.offset, index_reg,
+ pinned);
+
+ uintptr_t offset = imm.offset;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
+ // TODO(clemensb): Skip this if memory is 64 bit.
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
+ if (offset) {
+ __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
+ }
- uintptr_t offset = imm.offset;
- Register index_plus_offset =
- __ cache_state()->is_used(LiftoffRegister(index_reg))
- ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
- : index_reg;
- // TODO(clemensb): Skip this if memory is 64 bit.
- __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
- if (offset) {
- __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
+ LiftoffAssembler::VarState index =
+ __ cache_state()->stack_state.end()[-3];
+
+ // We replace the index on the value stack with the `index_plus_offset`
+ // calculated above. Thereby the BigInt allocation below does not
+ // overwrite the calculated value by accident.
+ if (full_index != LiftoffRegister(index_plus_offset)) {
+ __ cache_state()->dec_used(full_index);
+ __ cache_state()->inc_used(LiftoffRegister(index_plus_offset));
+ }
+ index.MakeRegister(LiftoffRegister(index_plus_offset));
+ }
+ {
+ // Convert the top value of the stack (the timeout) from I64 to a BigInt,
+ // which we can then pass to the atomic.wait builtin.
+ LiftoffAssembler::VarState i64_timeout =
+ __ cache_state()->stack_state.back();
+ CallRuntimeStub(
+ kNeedI64RegPair ? WasmCode::kI32PairToBigInt : WasmCode::kI64ToBigInt,
+ MakeSig::Returns(kRef).Params(kI64), {i64_timeout},
+ decoder->position());
+ __ DropValues(1);
+ // We put the result on the value stack so that it gets preserved across
+ // a potential GC that may get triggered by the BigInt allocation below.
+ __ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
+ Register expected_reg = no_reg;
+ if (kind == kI32) {
+ expected_reg = __ PeekToRegister(1, {}).gp();
+ } else {
+ LiftoffAssembler::VarState i64_expected =
+ __ cache_state()->stack_state.end()[-2];
+ CallRuntimeStub(
+ kNeedI64RegPair ? WasmCode::kI32PairToBigInt : WasmCode::kI64ToBigInt,
+ MakeSig::Returns(kRef).Params(kI64), {i64_expected},
+ decoder->position());
+ expected_reg = kReturnRegister0;
+ }
+ LiftoffRegister expected(expected_reg);
+
LiftoffAssembler::VarState timeout =
__ cache_state()->stack_state.end()[-1];
- LiftoffAssembler::VarState expected_value =
- __ cache_state()->stack_state.end()[-2];
+ LiftoffAssembler::VarState expected_value(kRef, expected, 0);
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
- // We have to set the correct register for the index.
- index.MakeRegister(LiftoffRegister(index_plus_offset));
-
- static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
- // 64 bit systems (kNeedI64RegPair == false):
- {WasmCode::kWasmI64AtomicWait64, WasmCode::kWasmI32AtomicWait64},
- // 32 bit systems (kNeedI64RegPair == true):
- {WasmCode::kWasmI64AtomicWait32, WasmCode::kWasmI32AtomicWait32}};
- auto target = kTargets[kNeedI64RegPair][kind == kI32];
+ auto target = kind == kI32 ? WasmCode::kWasmI32AtomicWait
+ : WasmCode::kWasmI64AtomicWait;
- CallRuntimeStub(target, MakeSig::Params(kPointerKind, kind, kI64),
- {index, expected_value, timeout}, decoder->position());
+ CallRuntimeStub(
+ target, MakeSig::Params(kPointerKind, kind == kI32 ? kI32 : kRef, kRef),
+ {index, expected_value, timeout}, decoder->position());
// Pop parameters from the value stack.
__ DropValues(3);
@@ -4988,8 +4987,7 @@ class LiftoffCompiler {
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
- void AtomicNotify(FullDecoder* decoder,
- const MemoryAccessImmediate<validate>& imm) {
+ void AtomicNotify(FullDecoder* decoder, const MemoryAccessImmediate& imm) {
LiftoffRegister full_index = __ PeekToRegister(1, {});
Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
full_index, {}, kDoForceCheck);
@@ -5095,8 +5093,8 @@ class LiftoffCompiler {
V(I64AtomicCompareExchange32U, kI64Store32)
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode,
- base::Vector<Value> args,
- const MemoryAccessImmediate<validate>& imm, Value* result) {
+ base::Vector<Value> args, const MemoryAccessImmediate& imm,
+ Value* result) {
switch (opcode) {
#define ATOMIC_STORE_OP(name, type) \
case wasm::kExpr##name: \
@@ -5197,9 +5195,8 @@ class LiftoffCompiler {
return reg.low();
}
- void MemoryInit(FullDecoder* decoder,
- const MemoryInitImmediate<validate>& imm, const Value&,
- const Value&, const Value&) {
+ void MemoryInit(FullDecoder* decoder, const MemoryInitImmediate& imm,
+ const Value&, const Value&, const Value&) {
Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(__ PopToRegister(pinned));
@@ -5243,7 +5240,7 @@ class LiftoffCompiler {
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp(), no_reg, trapping);
}
- void DataDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
+ void DataDrop(FullDecoder* decoder, const IndexImmediate& imm) {
LiftoffRegList pinned;
Register seg_size_array =
@@ -5265,9 +5262,8 @@ class LiftoffCompiler {
pinned);
}
- void MemoryCopy(FullDecoder* decoder,
- const MemoryCopyImmediate<validate>& imm, const Value&,
- const Value&, const Value&) {
+ void MemoryCopy(FullDecoder* decoder, const MemoryCopyImmediate& imm,
+ const Value&, const Value&, const Value&) {
Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(
@@ -5306,9 +5302,8 @@ class LiftoffCompiler {
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp(), no_reg, trapping);
}
- void MemoryFill(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm, const Value&,
- const Value&, const Value&) {
+ void MemoryFill(FullDecoder* decoder, const MemoryIndexImmediate& imm,
+ const Value&, const Value&, const Value&) {
Register mem_offsets_high_word = no_reg;
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(
@@ -5352,7 +5347,7 @@ class LiftoffCompiler {
__ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
}
- void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
+ void TableInit(FullDecoder* decoder, const TableInitImmediate& imm,
base::Vector<Value> args) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -5382,7 +5377,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
- void ElemDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
+ void ElemDrop(FullDecoder* decoder, const IndexImmediate& imm) {
LiftoffRegList pinned;
Register dropped_elem_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -5404,7 +5399,7 @@ class LiftoffCompiler {
StoreType::kI32Store8, pinned);
}
- void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
+ void TableCopy(FullDecoder* decoder, const TableCopyImmediate& imm,
base::Vector<Value> args) {
LiftoffRegList pinned;
@@ -5435,8 +5430,8 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
- void TableGrow(FullDecoder* decoder, const IndexImmediate<validate>& imm,
- const Value&, const Value&, Value* result) {
+ void TableGrow(FullDecoder* decoder, const IndexImmediate& imm, const Value&,
+ const Value&, Value* result) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -5460,8 +5455,7 @@ class LiftoffCompiler {
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
- void TableSize(FullDecoder* decoder, const IndexImmediate<validate>& imm,
- Value*) {
+ void TableSize(FullDecoder* decoder, const IndexImmediate& imm, Value*) {
// We have to look up instance->tables[table_index].length.
LiftoffRegList pinned;
@@ -5486,8 +5480,8 @@ class LiftoffCompiler {
__ PushRegister(kI32, LiftoffRegister(result));
}
- void TableFill(FullDecoder* decoder, const IndexImmediate<validate>& imm,
- const Value&, const Value&, const Value&) {
+ void TableFill(FullDecoder* decoder, const IndexImmediate& imm, const Value&,
+ const Value&, const Value&) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -5509,9 +5503,8 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
- void StructNew(FullDecoder* decoder,
- const StructIndexImmediate<validate>& imm, const Value& rtt,
- bool initial_values_on_stack) {
+ void StructNew(FullDecoder* decoder, const StructIndexImmediate& imm,
+ const Value& rtt, bool initial_values_on_stack) {
LiftoffRegList pinned;
LiftoffRegister instance_size =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -5554,21 +5547,18 @@ class LiftoffCompiler {
__ PushRegister(kRef, obj);
}
- void StructNew(FullDecoder* decoder,
- const StructIndexImmediate<validate>& imm, const Value& rtt,
- const Value args[], Value* result) {
+ void StructNew(FullDecoder* decoder, const StructIndexImmediate& imm,
+ const Value& rtt, const Value args[], Value* result) {
StructNew(decoder, imm, rtt, true);
}
- void StructNewDefault(FullDecoder* decoder,
- const StructIndexImmediate<validate>& imm,
+ void StructNewDefault(FullDecoder* decoder, const StructIndexImmediate& imm,
const Value& rtt, Value* result) {
StructNew(decoder, imm, rtt, false);
}
void StructGet(FullDecoder* decoder, const Value& struct_obj,
- const FieldImmediate<validate>& field, bool is_signed,
- Value* result) {
+ const FieldImmediate& field, bool is_signed, Value* result) {
const StructType* struct_type = field.struct_imm.struct_type;
ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
if (!CheckSupportedType(decoder, field_kind, "field load")) return;
@@ -5584,8 +5574,7 @@ class LiftoffCompiler {
}
void StructSet(FullDecoder* decoder, const Value& struct_obj,
- const FieldImmediate<validate>& field,
- const Value& field_value) {
+ const FieldImmediate& field, const Value& field_value) {
const StructType* struct_type = field.struct_imm.struct_type;
ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
int offset = StructFieldOffset(struct_type, field.field_imm.index);
@@ -5596,7 +5585,7 @@ class LiftoffCompiler {
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
}
- void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate& imm,
ValueKind rtt_kind, bool initial_value_on_stack) {
// Max length check.
{
@@ -5673,21 +5662,20 @@ class LiftoffCompiler {
__ PushRegister(kRef, obj);
}
- void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate& imm,
const Value& length_value, const Value& initial_value,
const Value& rtt, Value* result) {
ArrayNew(decoder, imm, rtt.type.kind(), true);
}
- void ArrayNewDefault(FullDecoder* decoder,
- const ArrayIndexImmediate<validate>& imm,
+ void ArrayNewDefault(FullDecoder* decoder, const ArrayIndexImmediate& imm,
const Value& length, const Value& rtt, Value* result) {
ArrayNew(decoder, imm, rtt.type.kind(), false);
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
- const ArrayIndexImmediate<validate>& imm,
- const Value& index_val, bool is_signed, Value* result) {
+ const ArrayIndexImmediate& imm, const Value& index_val,
+ bool is_signed, Value* result) {
LiftoffRegList pinned;
LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned));
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
@@ -5708,8 +5696,8 @@ class LiftoffCompiler {
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
- const ArrayIndexImmediate<validate>& imm,
- const Value& index_val, const Value& value_val) {
+ const ArrayIndexImmediate& imm, const Value& index_val,
+ const Value& value_val) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
DCHECK_EQ(reg_class_for(imm.array_type->element_type().kind()),
@@ -5758,8 +5746,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back(5);
}
- void ArrayNewFixed(FullDecoder* decoder,
- const ArrayIndexImmediate<validate>& imm,
+ void ArrayNewFixed(FullDecoder* decoder, const ArrayIndexImmediate& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
ValueKind rtt_kind = rtt.type.kind();
@@ -5810,8 +5797,8 @@ class LiftoffCompiler {
}
void ArrayNewSegment(FullDecoder* decoder,
- const ArrayIndexImmediate<validate>& array_imm,
- const IndexImmediate<validate>& data_segment,
+ const ArrayIndexImmediate& array_imm,
+ const IndexImmediate& data_segment,
const Value& /* offset */, const Value& /* length */,
const Value& /* rtt */, Value* /* result */) {
LiftoffRegList pinned;
@@ -5906,8 +5893,12 @@ class LiftoffCompiler {
NullSucceeds null_succeeds,
const FreezeCacheState& frozen) {
Label match;
+ bool is_cast_from_any = obj_type.is_reference_to(HeapType::kAny);
- if (obj_type.is_nullable()) {
+ // Skip the null check if casting from any and not {null_succeeds}.
+ // In that case the instance type check will identify null as not being a
+ // wasm object and fail.
+ if (obj_type.is_nullable() && (!is_cast_from_any || null_succeeds)) {
__ emit_cond_jump(kEqual, null_succeeds ? &match : no_match,
obj_type.kind(), obj_reg, scratch_null, frozen);
}
@@ -5934,6 +5925,17 @@ class LiftoffCompiler {
// rtt.
__ emit_cond_jump(kEqual, &match, rtt_type.kind(), tmp1, rtt_reg, frozen);
+ if (is_cast_from_any) {
+ // Check for map being a map for a wasm object (struct, array, func).
+ __ Load(LiftoffRegister(scratch2), tmp1, no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U);
+ __ emit_i32_subi(scratch2, scratch2, FIRST_WASM_OBJECT_TYPE);
+ __ emit_i32_cond_jumpi(kUnsignedGreaterThan, no_match, scratch2,
+ LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
+ frozen);
+ }
+
// Constant-time subtyping check: load exactly one candidate RTT from the
// supertypes list.
// Step 1: load the WasmTypeInfo into {tmp1}.
@@ -5971,7 +5973,7 @@ class LiftoffCompiler {
Register scratch_null =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- if (obj.type.is_nullable()) LoadNullValue(scratch_null, pinned);
+ if (obj.type.is_nullable()) LoadNullValueForCompare(scratch_null, pinned);
{
FREEZE_STATE(frozen);
@@ -5997,8 +5999,8 @@ class LiftoffCompiler {
return RefIsEq(decoder, obj, result_val, null_succeeds);
case HeapType::kI31:
return RefIsI31(decoder, obj, result_val, null_succeeds);
- case HeapType::kData:
- return RefIsData(decoder, obj, result_val, null_succeeds);
+ case HeapType::kStruct:
+ return RefIsStruct(decoder, obj, result_val, null_succeeds);
case HeapType::kArray:
return RefIsArray(decoder, obj, result_val, null_succeeds);
case HeapType::kAny:
@@ -6010,7 +6012,7 @@ class LiftoffCompiler {
}
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* result) {
+ Value* result, bool null_succeeds) {
if (v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
// Just drop the rtt.
__ DropValues(1);
@@ -6024,17 +6026,37 @@ class LiftoffCompiler {
Register scratch_null =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- if (obj.type.is_nullable()) LoadNullValue(scratch_null, pinned);
+ if (obj.type.is_nullable()) LoadNullValueForCompare(scratch_null, pinned);
{
FREEZE_STATE(frozen);
+ NullSucceeds on_null = null_succeeds ? kNullSucceeds : kNullFails;
SubtypeCheck(decoder->module_, obj_reg.gp(), obj.type, rtt_reg.gp(),
- rtt.type, scratch_null, scratch2, trap_label, kNullSucceeds,
+ rtt.type, scratch_null, scratch2, trap_label, on_null,
frozen);
}
__ PushRegister(obj.type.kind(), obj_reg);
}
+ void RefCastAbstract(FullDecoder* decoder, const Value& obj, HeapType type,
+ Value* result_val, bool null_succeeds) {
+ switch (type.representation()) {
+ case HeapType::kEq:
+ return RefAsEq(decoder, obj, result_val, null_succeeds);
+ case HeapType::kI31:
+ return RefAsI31(decoder, obj, result_val, null_succeeds);
+ case HeapType::kStruct:
+ return RefAsStruct(decoder, obj, result_val, null_succeeds);
+ case HeapType::kArray:
+ return RefAsArray(decoder, obj, result_val, null_succeeds);
+ case HeapType::kAny:
+ // Any may never need a cast as it is either implicitly convertible or
+ // never convertible for any given type.
+ default:
+ UNREACHABLE();
+ }
+ }
+
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* /* result_on_branch */, uint32_t depth) {
// Avoid having sequences of branches do duplicate work.
@@ -6139,15 +6161,19 @@ class LiftoffCompiler {
}
// Abstract type checkers. They all fall through on match.
- void DataCheck(TypeCheck& check, const FreezeCacheState& frozen) {
+ void StructCheck(TypeCheck& check, const FreezeCacheState& frozen) {
LoadInstanceType(check, frozen, check.no_match);
- // We're going to test a range of WasmObject instance types with a single
- // unsigned comparison.
- Register tmp = check.instance_type();
- __ emit_i32_subi(tmp, tmp, FIRST_WASM_OBJECT_TYPE);
- __ emit_i32_cond_jumpi(kUnsignedGreaterThan, check.no_match, tmp,
- LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
- frozen);
+ LiftoffRegister instance_type(check.instance_type());
+ if (!v8_flags.wasm_gc_structref_as_dataref) {
+ __ emit_i32_cond_jumpi(kUnequal, check.no_match, check.instance_type(),
+ WASM_STRUCT_TYPE, frozen);
+ } else {
+ Register tmp = check.instance_type();
+ __ emit_i32_subi(tmp, tmp, FIRST_WASM_OBJECT_TYPE);
+ __ emit_i32_cond_jumpi(kUnsignedGreaterThan, check.no_match, tmp,
+ LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE,
+ frozen);
+ }
}
void ArrayCheck(TypeCheck& check, const FreezeCacheState& frozen) {
@@ -6206,9 +6232,9 @@ class LiftoffCompiler {
__ PushRegister(kI32, result);
}
- void RefIsData(FullDecoder* /* decoder */, const Value& object,
- Value* /* result_val */, bool null_succeeds = false) {
- AbstractTypeCheck<&LiftoffCompiler::DataCheck>(object, null_succeeds);
+ void RefIsStruct(FullDecoder* /* decoder */, const Value& object,
+ Value* /* result_val */, bool null_succeeds = false) {
+ AbstractTypeCheck<&LiftoffCompiler::StructCheck>(object, null_succeeds);
}
void RefIsEq(FullDecoder* /* decoder */, const Value& object,
@@ -6228,27 +6254,44 @@ class LiftoffCompiler {
template <TypeChecker type_checker>
void AbstractTypeCast(const Value& object, FullDecoder* decoder,
- ValueKind result_kind) {
- bool null_succeeds = false; // TODO(mliedtke): Use parameter.
+ ValueKind result_kind, bool null_succeeds = false) {
+ Label match;
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
TypeCheck check(object.type, trap_label, null_succeeds);
Initialize(check, kPeek);
FREEZE_STATE(frozen);
+
+ if (null_succeeds && check.obj_type.is_nullable()) {
+ __ emit_cond_jump(kEqual, &match, kRefNull, check.obj_reg,
+ check.null_reg(), frozen);
+ }
(this->*type_checker)(check, frozen);
+ __ bind(&match);
}
- void RefAsData(FullDecoder* decoder, const Value& object,
- Value* /* result */) {
- AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef);
+ void RefAsEq(FullDecoder* decoder, const Value& object, Value* result,
+ bool null_succeeds = false) {
+ AbstractTypeCast<&LiftoffCompiler::EqCheck>(object, decoder, kRef,
+ null_succeeds);
}
- void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
- AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
+ void RefAsStruct(FullDecoder* decoder, const Value& object,
+ Value* /* result */, bool null_succeeds = false) {
+ AbstractTypeCast<&LiftoffCompiler::StructCheck>(object, decoder, kRef,
+ null_succeeds);
}
- void RefAsArray(FullDecoder* decoder, const Value& object, Value* result) {
- AbstractTypeCast<&LiftoffCompiler::ArrayCheck>(object, decoder, kRef);
+ void RefAsI31(FullDecoder* decoder, const Value& object, Value* result,
+ bool null_succeeds = false) {
+ AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef,
+ null_succeeds);
+ }
+
+ void RefAsArray(FullDecoder* decoder, const Value& object, Value* result,
+ bool null_succeeds = false) {
+ AbstractTypeCast<&LiftoffCompiler::ArrayCheck>(object, decoder, kRef,
+ null_succeeds);
}
template <TypeChecker type_checker>
@@ -6294,9 +6337,9 @@ class LiftoffCompiler {
__ bind(&end);
}
- void BrOnData(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder, br_depth);
+ void BrOnStruct(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ BrOnAbstractType<&LiftoffCompiler::StructCheck>(object, decoder, br_depth);
}
void BrOnI31(FullDecoder* decoder, const Value& object,
@@ -6309,9 +6352,10 @@ class LiftoffCompiler {
BrOnAbstractType<&LiftoffCompiler::ArrayCheck>(object, decoder, br_depth);
}
- void BrOnNonData(FullDecoder* decoder, const Value& object,
- Value* /* value_on_branch */, uint32_t br_depth) {
- BrOnNonAbstractType<&LiftoffCompiler::DataCheck>(object, decoder, br_depth);
+ void BrOnNonStruct(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ BrOnNonAbstractType<&LiftoffCompiler::StructCheck>(object, decoder,
+ br_depth);
}
void BrOnNonI31(FullDecoder* decoder, const Value& object,
@@ -6325,8 +6369,7 @@ class LiftoffCompiler {
br_depth);
}
- void StringNewWtf8(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ void StringNewWtf8(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const unibrow::Utf8Variant variant, const Value& offset,
const Value& size, Value* result) {
LiftoffRegList pinned;
@@ -6390,8 +6433,7 @@ class LiftoffCompiler {
__ PushRegister(kRef, result_reg);
}
- void StringNewWtf16(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ void StringNewWtf16(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const Value& offset, const Value& size, Value* result) {
LiftoffRegList pinned;
LiftoffRegister memory_reg =
@@ -6439,8 +6481,8 @@ class LiftoffCompiler {
__ PushRegister(kRef, result_reg);
}
- void StringConst(FullDecoder* decoder,
- const StringConstImmediate<validate>& imm, Value* result) {
+ void StringConst(FullDecoder* decoder, const StringConstImmediate& imm,
+ Value* result) {
LiftoffRegList pinned;
LiftoffRegister index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -6500,8 +6542,7 @@ class LiftoffCompiler {
__ PushRegister(kI32, value);
}
- void StringEncodeWtf8(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ void StringEncodeWtf8(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const unibrow::Utf8Variant variant, const Value& str,
const Value& offset, Value* result) {
LiftoffRegList pinned;
@@ -6581,8 +6622,7 @@ class LiftoffCompiler {
__ PushRegister(kI32, result_reg);
}
- void StringEncodeWtf16(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ void StringEncodeWtf16(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const Value& str, const Value& offset, Value* result) {
LiftoffRegList pinned;
@@ -6687,7 +6727,7 @@ class LiftoffCompiler {
LiftoffRegister null = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
bool check_for_null = a.type.is_nullable() || b.type.is_nullable();
if (check_for_null) {
- LoadNullValue(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned);
}
FREEZE_STATE(frozen);
@@ -6803,7 +6843,7 @@ class LiftoffCompiler {
}
void StringViewWtf8Encode(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ const MemoryIndexImmediate& imm,
const unibrow::Utf8Variant variant,
const Value& view, const Value& addr,
const Value& pos, const Value& bytes,
@@ -6910,10 +6950,9 @@ class LiftoffCompiler {
}
void StringViewWtf16Encode(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
- const Value& view, const Value& offset,
- const Value& pos, const Value& codeunits,
- Value* result) {
+ const MemoryIndexImmediate& imm, const Value& view,
+ const Value& offset, const Value& pos,
+ const Value& codeunits, Value* result) {
LiftoffRegList pinned;
LiftoffAssembler::VarState& codeunits_var =
@@ -7096,8 +7135,7 @@ class LiftoffCompiler {
}
private:
- void CallDirect(FullDecoder* decoder,
- const CallFunctionImmediate<validate>& imm,
+ void CallDirect(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[], Value returns[], TailCall tail_call) {
MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
for (ValueKind ret : sig.returns()) {
@@ -7184,8 +7222,7 @@ class LiftoffCompiler {
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
- const CallIndirectImmediate<validate>& imm,
- TailCall tail_call) {
+ const CallIndirectImmediate& imm, TailCall tail_call) {
MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
@@ -7436,11 +7473,29 @@ class LiftoffCompiler {
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
+ // TODO(13449): Use root register instead of isolate to retrieve null.
LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
__ LoadFullPointer(null, null,
IsolateData::root_slot_offset(RootIndex::kNullValue));
}
+ // Stores the null value representation in the passed register.
+ // If pointer compression is active, only the compressed tagged pointer
+ // will be stored. Any operations with this register therefore must
+ // not compare this against 64 bits using quadword instructions.
+ void LoadNullValueForCompare(Register null, LiftoffRegList pinned) {
+ Tagged_t static_null =
+ wasm::GetWasmEngine()->compressed_null_value_or_zero();
+ if (static_null != 0) {
+ // static_null is only set for builds with pointer compression.
+ DCHECK_LE(static_null, std::numeric_limits<uint32_t>::max());
+ __ LoadConstant(LiftoffRegister(null),
+ WasmValue(static_cast<uint32_t>(static_null)));
+ } else {
+ LoadNullValue(null, pinned);
+ }
+ }
+
void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
RootIndex root_index) {
LOAD_INSTANCE_FIELD(dst, IsolateRoot, kSystemPointerSize, pinned);
@@ -7450,12 +7505,13 @@ class LiftoffCompiler {
void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
LiftoffRegList pinned, ValueType type) {
- if (v8_flags.experimental_wasm_skip_null_checks || !type.is_nullable())
+ if (v8_flags.experimental_wasm_skip_null_checks || !type.is_nullable()) {
return;
+ }
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
- LoadNullValue(null.gp(), pinned);
+ LoadNullValueForCompare(null.gp(), pinned);
FREEZE_STATE(trapping);
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kRefNull, object,
null.gp(), trapping);
@@ -7734,7 +7790,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
compiler_options.for_debugging == kForDebugging);
WasmFeatures unused_detected_features;
- WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::BooleanValidationTag, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features,
compiler_options.detected_features ? compiler_options.detected_features
: &unused_detected_features,
@@ -7787,6 +7843,8 @@ WasmCompilationResult ExecuteLiftoffCompilation(
}
DCHECK(result.succeeded());
+ env->module->set_function_validated(compiler_options.func_index);
+
return result;
}
@@ -7812,7 +7870,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
code->for_debugging() == kForStepping
? base::ArrayVector(kSteppingBreakpoints)
: base::Vector<const int>{};
- WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::BooleanValidationTag, LiftoffCompiler> decoder(
&zone, native_module->module(), env.enabled_features, &detected,
func_body, call_descriptor, &env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
index 5c2d1b1e55..4dc1086fca 100644
--- a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -58,10 +58,9 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
-// -4 | tiering budget |
// -----+--------------------+---------------------------
-// -5 | slot 0 | ^
-// -6 | slot 1 | |
+// -4 | slot 0 | ^
+// -5 | slot 1 | |
// | | Frame slots
// | | |
// | | v
@@ -193,6 +192,22 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ // On LOONG64, we must push at least {ra} before calling the stub, otherwise
+ // it would get clobbered with no possibility to recover it. So just set
+ // up the frame here.
+ EnterFrame(StackFrame::WASM);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
@@ -224,6 +239,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index a145c54da9..f3b78299c8 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -58,10 +58,9 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
-// -4 | tiering budget |
// -----+--------------------+---------------------------
-// -5 | slot 0 | ^
-// -6 | slot 1 | |
+// -4 | slot 0 | ^
+// -5 | slot 1 | |
// | | Frame slots
// | | |
// | | v
@@ -311,6 +310,22 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ // On MIPS64, we must push at least {ra} before calling the stub, otherwise
+ // it would get clobbered with no possibility to recover it. So just set
+ // up the frame here.
+ EnterFrame(StackFrame::WASM);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
@@ -342,6 +357,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index a24575ee3d..348db4d1af 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -111,6 +111,20 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+ Register scratch = ip;
+ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
+ PushCommonFrame(scratch);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
Register scratch = ip;
@@ -142,6 +156,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
int frame_size =
GetTotalFrameSize() -
(V8_EMBEDDED_CONSTANT_POOL_BOOL ? 3 : 2) * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
Assembler patching_assembler(
AssemblerOptions{},
@@ -1644,7 +1662,20 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRefNull:
case kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
- V8_FALLTHROUGH;
+#if defined(V8_COMPRESS_POINTERS)
+ if (use_signed) {
+ CmpS32(lhs, rhs);
+ } else {
+ CmpU32(lhs, rhs);
+ }
+#else
+ if (use_signed) {
+ CmpS64(lhs, rhs);
+ } else {
+ CmpU64(lhs, rhs);
+ }
+#endif
+ break;
case kI64:
if (use_signed) {
CmpS64(lhs, rhs);
@@ -1767,64 +1798,72 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_add_sat_s, I16x8AddSatS) \
- V(i16x8_sub_sat_s, I16x8SubSatS) \
- V(i16x8_add_sat_u, I16x8AddSatU) \
- V(i16x8_sub_sat_u, I16x8SubSatU) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_add_sat_s, I8x16AddSatS) \
- V(i8x16_sub_sat_s, I8x16SubSatS) \
- V(i8x16_add_sat_u, I8x16AddSatU) \
- V(i8x16_sub_sat_u, I8x16SubSatU)
+#define SIMD_BINOP_LIST(V) \
+ V(f64x2_add, F64x2Add) \
+ V(f64x2_sub, F64x2Sub) \
+ V(f64x2_mul, F64x2Mul) \
+ V(f64x2_div, F64x2Div) \
+ V(f64x2_eq, F64x2Eq) \
+ V(f64x2_lt, F64x2Lt) \
+ V(f64x2_le, F64x2Le) \
+ V(f32x4_add, F32x4Add) \
+ V(f32x4_sub, F32x4Sub) \
+ V(f32x4_mul, F32x4Mul) \
+ V(f32x4_div, F32x4Div) \
+ V(f32x4_min, F32x4Min) \
+ V(f32x4_max, F32x4Max) \
+ V(f32x4_eq, F32x4Eq) \
+ V(f32x4_lt, F32x4Lt) \
+ V(f32x4_le, F32x4Le) \
+ V(i64x2_add, I64x2Add) \
+ V(i64x2_sub, I64x2Sub) \
+ V(i64x2_eq, I64x2Eq) \
+ V(i64x2_gt_s, I64x2GtS) \
+ V(i32x4_add, I32x4Add) \
+ V(i32x4_sub, I32x4Sub) \
+ V(i32x4_mul, I32x4Mul) \
+ V(i32x4_min_s, I32x4MinS) \
+ V(i32x4_min_u, I32x4MinU) \
+ V(i32x4_max_s, I32x4MaxS) \
+ V(i32x4_max_u, I32x4MaxU) \
+ V(i32x4_eq, I32x4Eq) \
+ V(i32x4_gt_s, I32x4GtS) \
+ V(i32x4_gt_u, I32x4GtU) \
+ V(i16x8_add, I16x8Add) \
+ V(i16x8_sub, I16x8Sub) \
+ V(i16x8_mul, I16x8Mul) \
+ V(i16x8_min_s, I16x8MinS) \
+ V(i16x8_min_u, I16x8MinU) \
+ V(i16x8_max_s, I16x8MaxS) \
+ V(i16x8_max_u, I16x8MaxU) \
+ V(i16x8_eq, I16x8Eq) \
+ V(i16x8_gt_s, I16x8GtS) \
+ V(i16x8_gt_u, I16x8GtU) \
+ V(i16x8_add_sat_s, I16x8AddSatS) \
+ V(i16x8_sub_sat_s, I16x8SubSatS) \
+ V(i16x8_add_sat_u, I16x8AddSatU) \
+ V(i16x8_sub_sat_u, I16x8SubSatU) \
+ V(i16x8_sconvert_i32x4, I16x8SConvertI32x4) \
+ V(i16x8_uconvert_i32x4, I16x8UConvertI32x4) \
+ V(i8x16_add, I8x16Add) \
+ V(i8x16_sub, I8x16Sub) \
+ V(i8x16_min_s, I8x16MinS) \
+ V(i8x16_min_u, I8x16MinU) \
+ V(i8x16_max_s, I8x16MaxS) \
+ V(i8x16_max_u, I8x16MaxU) \
+ V(i8x16_eq, I8x16Eq) \
+ V(i8x16_gt_s, I8x16GtS) \
+ V(i8x16_gt_u, I8x16GtU) \
+ V(i8x16_add_sat_s, I8x16AddSatS) \
+ V(i8x16_sub_sat_s, I8x16SubSatS) \
+ V(i8x16_add_sat_u, I8x16AddSatU) \
+ V(i8x16_sub_sat_u, I8x16SubSatU) \
+ V(i8x16_sconvert_i16x8, I8x16SConvertI16x8) \
+ V(i8x16_uconvert_i16x8, I8x16UConvertI16x8) \
+ V(s128_and, S128And) \
+ V(s128_or, S128Or) \
+ V(s128_xor, S128Xor) \
+ V(s128_and_not, S128AndNot)
#define EMIT_SIMD_BINOP(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
@@ -1835,19 +1874,35 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
-#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
- V(f64x2_ne, F64x2Ne) \
- V(f32x4_ne, F32x4Ne) \
- V(i64x2_ne, I64x2Ne) \
- V(i64x2_ge_s, I64x2GeS) \
- V(i32x4_ne, I32x4Ne) \
- V(i32x4_ge_s, I32x4GeS) \
- V(i32x4_ge_u, I32x4GeU) \
- V(i16x8_ne, I16x8Ne) \
- V(i16x8_ge_s, I16x8GeS) \
- V(i16x8_ge_u, I16x8GeU) \
- V(i8x16_ne, I8x16Ne) \
- V(i8x16_ge_s, I8x16GeS) \
+#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
+ V(f64x2_ne, F64x2Ne) \
+ V(f64x2_pmin, F64x2Pmin) \
+ V(f64x2_pmax, F64x2Pmax) \
+ V(f32x4_ne, F32x4Ne) \
+ V(f32x4_pmin, F32x4Pmin) \
+ V(f32x4_pmax, F32x4Pmax) \
+ V(i64x2_ne, I64x2Ne) \
+ V(i64x2_ge_s, I64x2GeS) \
+ V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S) \
+ V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U) \
+ V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
+ V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
+ V(i32x4_ne, I32x4Ne) \
+ V(i32x4_ge_s, I32x4GeS) \
+ V(i32x4_ge_u, I32x4GeU) \
+ V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S) \
+ V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
+ V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
+ V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
+ V(i16x8_ne, I16x8Ne) \
+ V(i16x8_ge_s, I16x8GeS) \
+ V(i16x8_ge_u, I16x8GeU) \
+ V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S) \
+ V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
+ V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
+ V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U) \
+ V(i8x16_ne, I8x16Ne) \
+ V(i8x16_ge_s, I8x16GeS) \
V(i8x16_ge_u, I8x16GeU)
#define EMIT_SIMD_BINOP_WITH_SCRATCH(name, op) \
@@ -1907,22 +1962,30 @@ SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
#undef EMIT_SIMD_SHIFT_RI
#undef SIMD_SHIFT_RI_LIST
-#define SIMD_UNOP_LIST(V) \
- V(f64x2_abs, F64x2Abs, , void) \
- V(f64x2_neg, F64x2Neg, , void) \
- V(f64x2_sqrt, F64x2Sqrt, , void) \
- V(f64x2_ceil, F64x2Ceil, true, bool) \
- V(f64x2_floor, F64x2Floor, true, bool) \
- V(f64x2_trunc, F64x2Trunc, true, bool) \
- V(f32x4_abs, F32x4Abs, , void) \
- V(f32x4_neg, F32x4Neg, , void) \
- V(i64x2_neg, I64x2Neg, , void) \
- V(i32x4_neg, I32x4Neg, , void) \
- V(f32x4_sqrt, F32x4Sqrt, , void) \
- V(f32x4_ceil, F32x4Ceil, true, bool) \
- V(f32x4_floor, F32x4Floor, true, bool) \
- V(f32x4_trunc, F32x4Trunc, true, bool) \
- V(i8x16_popcnt, I8x16Popcnt, , void)
+#define SIMD_UNOP_LIST(V) \
+ V(f64x2_abs, F64x2Abs, , void) \
+ V(f64x2_neg, F64x2Neg, , void) \
+ V(f64x2_sqrt, F64x2Sqrt, , void) \
+ V(f64x2_ceil, F64x2Ceil, true, bool) \
+ V(f64x2_floor, F64x2Floor, true, bool) \
+ V(f64x2_trunc, F64x2Trunc, true, bool) \
+ V(f32x4_abs, F32x4Abs, , void) \
+ V(f32x4_neg, F32x4Neg, , void) \
+ V(f32x4_sqrt, F32x4Sqrt, , void) \
+ V(f32x4_ceil, F32x4Ceil, true, bool) \
+ V(f32x4_floor, F32x4Floor, true, bool) \
+ V(f32x4_trunc, F32x4Trunc, true, bool) \
+ V(i64x2_neg, I64x2Neg, , void) \
+ V(f64x2_convert_low_i32x4_s, F64x2ConvertLowI32x4S, , void) \
+ V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, , void) \
+ V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, , void) \
+ V(i32x4_neg, I32x4Neg, , void) \
+ V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, , void) \
+ V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, , void) \
+ V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, , void) \
+ V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, , void) \
+ V(i8x16_popcnt, I8x16Popcnt, , void) \
+ V(s128_not, S128Not, , void)
#define EMIT_SIMD_UNOP(name, op, return_val, return_type) \
return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
@@ -1934,6 +1997,38 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
+#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
+ V(i64x2_abs, I64x2Abs, , void) \
+ V(i32x4_abs, I32x4Abs, , void) \
+ V(i16x8_abs, I16x8Abs, , void) \
+ V(i16x8_neg, I16x8Neg, , void) \
+ V(i8x16_abs, I8x16Abs, , void) \
+ V(i8x16_neg, I8x16Neg, , void)
+
+#define EMIT_SIMD_UNOP_WITH_SCRATCH(name, op, return_val, return_type) \
+ return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ op(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg); \
+ return return_val; \
+ }
+SIMD_UNOP_WITH_SCRATCH_LIST(EMIT_SIMD_UNOP_WITH_SCRATCH)
+#undef EMIT_SIMD_UNOP_WITH_SCRATCH
+#undef SIMD_UNOP_WITH_SCRATCH_LIST
+
+#define SIMD_ALL_TRUE_LIST(V) \
+ V(i64x2_alltrue, I64x2AllTrue) \
+ V(i32x4_alltrue, I32x4AllTrue) \
+ V(i16x8_alltrue, I16x8AllTrue) \
+ V(i8x16_alltrue, I8x16AllTrue)
+#define EMIT_SIMD_ALL_TRUE(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
+ LiftoffRegister src) { \
+ op(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg); \
+ }
+SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
+#undef EMIT_SIMD_ALL_TRUE
+#undef SIMD_ALL_TRUE_LIST
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
F64x2Splat(dst.fp().toSimd(), src.fp(), r0);
@@ -2068,36 +2163,6 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
imm_lane_idx, kScratchSimd128Reg);
}
-void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- I64x2Abs(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg);
-}
-
-void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- I32x4Abs(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg);
-}
-
-void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- I16x8Abs(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg);
-}
-
-void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- I16x8Neg(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg);
-}
-
-void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
- LiftoffRegister src) {
- I8x16Abs(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg);
-}
-
-void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
- LiftoffRegister src) {
- I8x16Neg(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg);
-}
-
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// TODO(miladfarca): Make use of UseScratchRegisterScope.
@@ -2199,16 +2264,6 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect");
}
-void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmin unimplemented");
-}
-
-void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmax unimplemented");
-}
-
void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2221,14 +2276,10 @@ void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst,
bailout(kRelaxedSimd, "emit_f64x2_relaxed_max");
}
-void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "f64x2.convert_low_i32x4_s");
-}
-
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.convert_low_i32x4_u");
+ F64x2ConvertLowI32x4U(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -2248,73 +2299,21 @@ void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_f32x4_relaxed_max");
}
-void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmin unimplemented");
-}
-
-void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "pmax unimplemented");
-}
-
-void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_alltrue");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_low_i32x4_s unsupported");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_low_i32x4_u unsupported");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_high_i32x4_s unsupported");
-}
-
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "i64x2_bitmask");
}
-void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_sconvert_i32x4_low");
-}
-
-void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i64x2_sconvert_i32x4_high");
-}
-
void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_uconvert_i32x4_low");
+ I64x2UConvertI32x4Low(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_uconvert_i32x4_high");
-}
-
-void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i32x4_alltrue");
+ I64x2UConvertI32x4High(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
@@ -2338,35 +2337,6 @@ void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
}
-void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_low_i16x8_s unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_low_i16x8_u unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_high_i16x8_s unsupported");
-}
-
-void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
-}
-
-void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i16x8_alltrue");
-}
-
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "i16x8_bitmask");
@@ -2382,36 +2352,12 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
}
-void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8.extmul_low_i8x16_s unsupported");
-}
-
-void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8.extmul_low_i8x16_u unsupported");
-}
-
-void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
-}
-
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
bailout(kSimd, "i16x8_q15mulr_sat_s");
}
-void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2) {
- bailout(kSimd, "i16x8_extmul_high_i8x16_u unsupported");
-}
-
void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2441,12 +2387,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "v8x16_anytrue");
-}
-
-void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "i8x16_alltrue");
+ V128AnyTrue(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
@@ -2459,25 +2400,6 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_s128_const");
}
-void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_s128_not");
-}
-
-void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_and");
-}
-
-void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_or");
-}
-
-void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_xor");
-}
-
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
@@ -2510,68 +2432,28 @@ void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
bailout(kSimd, "f32x4.demote_f64x2_zero");
}
-void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_sconvert_i16x8");
-}
-
-void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i8x16_uconvert_i16x8");
-}
-
-void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i32x4");
-}
-
-void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i32x4");
-}
-
-void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_low");
-}
-
-void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_high");
-}
-
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_low");
+ I16x8UConvertI8x16Low(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_high");
-}
-
-void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_low");
-}
-
-void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_high");
+ I16x8UConvertI8x16High(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_low");
+ I32x4UConvertI16x8Low(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
+ I32x4UConvertI16x8High(dst.fp().toSimd(), src.fp().toSimd(), r0,
+ kScratchSimd128Reg);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
@@ -2584,12 +2466,6 @@ void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
}
-void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_s128_and_not");
-}
-
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
index e5838031ab..903cb07ef8 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
@@ -91,6 +91,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
@@ -2227,6 +2231,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
StoreWord(scratch, MemOperand(dst));
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ // On MIPS64, we must push at least {ra} before calling the stub, otherwise
+ // it would get clobbered with no possibility to recover it. So just set
+ // up the frame here.
+ EnterFrame(StackFrame::WASM);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h
index 28803b9f10..ed559941d4 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h
@@ -33,10 +33,9 @@ namespace liftoff {
// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
-// -4 | tiering budget |
// -----+--------------------+---------------------------
-// -5 | slot 0 | ^
-// -6 | slot 1 | |
+// -4 | slot 0 | ^
+// -5 | slot 1 | |
// | | Frame slots
// | | |
// | | v
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h
index 738c3a8126..a04bd47790 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h
@@ -33,10 +33,9 @@ namespace liftoff {
// -1 | StackFrame::WASM |
// -2 | instance |
// -3 | feedback vector|
-// -4 | tiering budget |
// -----+--------------------+---------------------------
-// -5 | slot 0 | ^
-// -6 | slot 1 | |
+// -4 | slot 0 | ^
+// -5 | slot 1 | |
// | | Frame slots
// | | |
// | | v
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index f3e7f90b99..1aa8864f5d 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -100,6 +100,22 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+ // On ARM, we must push at least {lr} before calling the stub, otherwise
+ // it would get clobbered with no possibility to recover it.
+ Register scratch = ip;
+ mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
+ PushCommonFrame(scratch);
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
Register scratch = r1;
@@ -128,6 +144,10 @@ void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
int offset, SafepointTableBuilder* safepoint_table_builder) {
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
constexpr int LayInstrSize = 6;
@@ -2169,7 +2189,20 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRefNull:
case kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
- V8_FALLTHROUGH;
+#if defined(V8_COMPRESS_POINTERS)
+ if (use_signed) {
+ CmpS32(lhs, rhs);
+ } else {
+ CmpU32(lhs, rhs);
+ }
+#else
+ if (use_signed) {
+ CmpS64(lhs, rhs);
+ } else {
+ CmpU64(lhs, rhs);
+ }
+#endif
+ break;
case kI64:
if (use_signed) {
CmpS64(lhs, rhs);
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index ace71919e5..526be9fc68 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -191,6 +191,18 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
+void LiftoffAssembler::CallFrameSetupStub(int declared_function_index) {
+ // TODO(jkummerow): Enable this check when we have C++20.
+ // static_assert(std::find(std::begin(wasm::kGpParamRegisters),
+ // std::end(wasm::kGpParamRegisters),
+ // kLiftoffFrameSetupFunctionReg) ==
+ // std::end(wasm::kGpParamRegisters));
+
+ LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
+ WasmValue(declared_function_index));
+ CallRuntimeStub(WasmCode::kWasmLiftoffFrameSetup);
+}
+
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
// Push the return address and frame pointer to complete the stack frame.
@@ -219,6 +231,10 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ // The frame setup builtin also pushes the feedback vector.
+ if (v8_flags.wasm_speculative_inlining) {
+ frame_size -= kSystemPointerSize;
+ }
DCHECK_EQ(0, frame_size % kSystemPointerSize);
// We can't run out of space when patching, just pass anything big enough to
@@ -2165,7 +2181,15 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case kRefNull:
case kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
- V8_FALLTHROUGH;
+#if defined(V8_COMPRESS_POINTERS)
+ // It's enough to do a 32-bit comparison. This is also necessary for
+ // null checks which only compare against a 32 bit value, not a full
+ // pointer.
+ cmpl(lhs, rhs);
+#else
+ cmpq(lhs, rhs);
+#endif
+ break;
case kI64:
cmpq(lhs, rhs);
break;
diff --git a/deps/v8/src/wasm/constant-expression-interface.cc b/deps/v8/src/wasm/constant-expression-interface.cc
index aa7a2809f3..00f78a0f4e 100644
--- a/deps/v8/src/wasm/constant-expression-interface.cc
+++ b/deps/v8/src/wasm/constant-expression-interface.cc
@@ -36,7 +36,7 @@ void ConstantExpressionInterface::F64Const(FullDecoder* decoder, Value* result,
}
void ConstantExpressionInterface::S128Const(FullDecoder* decoder,
- Simd128Immediate<validate>& imm,
+ Simd128Immediate& imm,
Value* result) {
if (!generate_value()) return;
result->runtime_value = WasmValue(imm.value, kWasmS128);
@@ -97,9 +97,8 @@ void ConstantExpressionInterface::RefFunc(FullDecoder* decoder,
result->runtime_value = WasmValue(internal, type);
}
-void ConstantExpressionInterface::GlobalGet(
- FullDecoder* decoder, Value* result,
- const GlobalIndexImmediate<validate>& imm) {
+void ConstantExpressionInterface::GlobalGet(FullDecoder* decoder, Value* result,
+ const GlobalIndexImmediate& imm) {
if (!generate_value()) return;
const WasmGlobal& global = module_->globals[imm.index];
DCHECK(!global.mutability);
@@ -116,9 +115,10 @@ void ConstantExpressionInterface::GlobalGet(
global.type);
}
-void ConstantExpressionInterface::StructNew(
- FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
- const Value& rtt, const Value args[], Value* result) {
+void ConstantExpressionInterface::StructNew(FullDecoder* decoder,
+ const StructIndexImmediate& imm,
+ const Value& rtt,
+ const Value args[], Value* result) {
if (!generate_value()) return;
std::vector<WasmValue> field_values(imm.struct_type->field_count());
for (size_t i = 0; i < field_values.size(); i++) {
@@ -131,9 +131,9 @@ void ConstantExpressionInterface::StructNew(
ValueType::Ref(HeapType(imm.index)));
}
-void ConstantExpressionInterface::StringConst(
- FullDecoder* decoder, const StringConstImmediate<validate>& imm,
- Value* result) {
+void ConstantExpressionInterface::StringConst(FullDecoder* decoder,
+ const StringConstImmediate& imm,
+ Value* result) {
if (!generate_value()) return;
static_assert(base::IsInRange(kV8MaxWasmStringLiterals, 0, Smi::kMaxValue));
@@ -180,8 +180,8 @@ WasmValue DefaultValueForType(ValueType type, Isolate* isolate) {
} // namespace
void ConstantExpressionInterface::StructNewDefault(
- FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
- const Value& rtt, Value* result) {
+ FullDecoder* decoder, const StructIndexImmediate& imm, const Value& rtt,
+ Value* result) {
if (!generate_value()) return;
std::vector<WasmValue> field_values(imm.struct_type->field_count());
for (uint32_t i = 0; i < field_values.size(); i++) {
@@ -194,10 +194,11 @@ void ConstantExpressionInterface::StructNewDefault(
ValueType::Ref(imm.index));
}
-void ConstantExpressionInterface::ArrayNew(
- FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
- const Value& length, const Value& initial_value, const Value& rtt,
- Value* result) {
+void ConstantExpressionInterface::ArrayNew(FullDecoder* decoder,
+ const ArrayIndexImmediate& imm,
+ const Value& length,
+ const Value& initial_value,
+ const Value& rtt, Value* result) {
if (!generate_value()) return;
if (length.runtime_value.to_u32() >
static_cast<uint32_t>(WasmArray::MaxLength(imm.array_type))) {
@@ -213,8 +214,8 @@ void ConstantExpressionInterface::ArrayNew(
}
void ConstantExpressionInterface::ArrayNewDefault(
- FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
- const Value& length, const Value& rtt, Value* result) {
+ FullDecoder* decoder, const ArrayIndexImmediate& imm, const Value& length,
+ const Value& rtt, Value* result) {
if (!generate_value()) return;
Value initial_value(decoder->pc(), imm.array_type->element_type());
initial_value.runtime_value =
@@ -223,7 +224,7 @@ void ConstantExpressionInterface::ArrayNewDefault(
}
void ConstantExpressionInterface::ArrayNewFixed(
- FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ FullDecoder* decoder, const ArrayIndexImmediate& imm,
const base::Vector<Value>& elements, const Value& rtt, Value* result) {
if (!generate_value()) return;
std::vector<WasmValue> element_values;
@@ -236,8 +237,8 @@ void ConstantExpressionInterface::ArrayNewFixed(
}
void ConstantExpressionInterface::ArrayNewSegment(
- FullDecoder* decoder, const ArrayIndexImmediate<validate>& array_imm,
- const IndexImmediate<validate>& segment_imm, const Value& offset_value,
+ FullDecoder* decoder, const ArrayIndexImmediate& array_imm,
+ const IndexImmediate& segment_imm, const Value& offset_value,
const Value& length_value, const Value& rtt, Value* result) {
if (!generate_value()) return;
diff --git a/deps/v8/src/wasm/constant-expression-interface.h b/deps/v8/src/wasm/constant-expression-interface.h
index 6dc225e7b7..f109907ace 100644
--- a/deps/v8/src/wasm/constant-expression-interface.h
+++ b/deps/v8/src/wasm/constant-expression-interface.h
@@ -30,10 +30,10 @@ namespace wasm {
// if {!has_error()}, or with {error()} otherwise.
class V8_EXPORT_PRIVATE ConstantExpressionInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ using ValidationTag = Decoder::FullValidationTag;
static constexpr DecodingMode decoding_mode = kConstantExpression;
- struct Value : public ValueBase<validate> {
+ struct Value : public ValueBase<ValidationTag> {
WasmValue runtime_value;
template <typename... Args>
@@ -41,9 +41,10 @@ class V8_EXPORT_PRIVATE ConstantExpressionInterface {
: ValueBase(std::forward<Args>(args)...) {}
};
- using Control = ControlBase<Value, validate>;
+ using Control = ControlBase<Value, ValidationTag>;
using FullDecoder =
- WasmFullDecoder<validate, ConstantExpressionInterface, decoding_mode>;
+ WasmFullDecoder<ValidationTag, ConstantExpressionInterface,
+ decoding_mode>;
ConstantExpressionInterface(const WasmModule* module, Isolate* isolate,
Handle<WasmInstanceObject> instance)
diff --git a/deps/v8/src/wasm/constant-expression.cc b/deps/v8/src/wasm/constant-expression.cc
index 0f7594fecb..6b6c69ad75 100644
--- a/deps/v8/src/wasm/constant-expression.cc
+++ b/deps/v8/src/wasm/constant-expression.cc
@@ -56,10 +56,10 @@ ValueOrError EvaluateConstantExpression(Zone* zone, ConstantExpression expr,
auto sig = FixedSizeSignature<ValueType>::Returns(expected);
FunctionBody body(&sig, ref.offset(), start, end);
WasmFeatures detected;
- // We use kFullValidation so we do not have to create another template
+ // We use FullValidationTag so we do not have to create another template
// instance of WasmFullDecoder, which would cost us >50Kb binary code
// size.
- WasmFullDecoder<Decoder::kFullValidation, ConstantExpressionInterface,
+ WasmFullDecoder<Decoder::FullValidationTag, ConstantExpressionInterface,
kConstantExpression>
decoder(zone, instance->module(), WasmFeatures::All(), &detected,
body, instance->module(), isolate, instance);
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 2f6ffeb6af..8e9830b447 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -42,12 +42,36 @@ using DecodeResult = VoidResult;
// a buffer of bytes.
class Decoder {
public:
- // {ValidateFlag} can be used in a boolean manner ({if (!validate) ...}).
- enum ValidateFlag : int8_t {
- kNoValidation = 0, // Don't run validation, assume valid input.
- kBooleanValidation, // Run validation but only store a generic error.
- kFullValidation // Run full validation with error message and location.
+ // Don't run validation, assume valid input.
+ static constexpr struct NoValidationTag {
+ static constexpr bool validate = false;
+ static constexpr bool full_validation = false;
+ } kNoValidation = {};
+ // Run validation but only store a generic error.
+ static constexpr struct BooleanValidationTag {
+ static constexpr bool validate = true;
+ static constexpr bool full_validation = false;
+ } kBooleanValidation = {};
+ // Run full validation with error message and location.
+ static constexpr struct FullValidationTag {
+ static constexpr bool validate = true;
+ static constexpr bool full_validation = true;
+ } kFullValidation = {};
+
+ struct NoName {
+ constexpr NoName(const char*) {}
+ operator const char*() const { UNREACHABLE(); }
};
+ // Pass a {NoName} if we know statically that we do not use it anyway (we are
+ // not tracing (in release mode) and not running full validation).
+#ifdef DEBUG
+ template <typename ValidationTag>
+ using Name = const char*;
+#else
+ template <typename ValidationTag>
+ using Name =
+ std::conditional_t<ValidationTag::full_validation, const char*, NoName>;
+#endif
enum TraceFlag : bool { kTrace = true, kNoTrace = false };
@@ -66,96 +90,90 @@ class Decoder {
virtual ~Decoder() = default;
- // Ensures there are at least {length} bytes left to read, starting at {pc}.
- bool validate_size(const byte* pc, uint32_t length, const char* msg) {
- DCHECK_LE(start_, pc);
- if (V8_UNLIKELY(pc > end_ || length > static_cast<uint32_t>(end_ - pc))) {
- error(pc, msg);
- return false;
- }
- return true;
- }
-
// Reads an 8-bit unsigned integer.
- template <ValidateFlag validate>
- uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
- return read_little_endian<uint8_t, validate>(pc, msg);
+ template <typename ValidationTag>
+ uint8_t read_u8(const byte* pc, Name<ValidationTag> msg = "expected 1 byte") {
+ return read_little_endian<uint8_t, ValidationTag>(pc, msg);
}
// Reads a 16-bit unsigned integer (little endian).
- template <ValidateFlag validate>
- uint16_t read_u16(const byte* pc, const char* msg = "expected 2 bytes") {
- return read_little_endian<uint16_t, validate>(pc, msg);
+ template <typename ValidationTag>
+ uint16_t read_u16(const byte* pc,
+ Name<ValidationTag> msg = "expected 2 bytes") {
+ return read_little_endian<uint16_t, ValidationTag>(pc, msg);
}
// Reads a 32-bit unsigned integer (little endian).
- template <ValidateFlag validate>
- uint32_t read_u32(const byte* pc, const char* msg = "expected 4 bytes") {
- return read_little_endian<uint32_t, validate>(pc, msg);
+ template <typename ValidationTag>
+ uint32_t read_u32(const byte* pc,
+ Name<ValidationTag> msg = "expected 4 bytes") {
+ return read_little_endian<uint32_t, ValidationTag>(pc, msg);
}
// Reads a 64-bit unsigned integer (little endian).
- template <ValidateFlag validate>
- uint64_t read_u64(const byte* pc, const char* msg = "expected 8 bytes") {
- return read_little_endian<uint64_t, validate>(pc, msg);
+ template <typename ValidationTag>
+ uint64_t read_u64(const byte* pc,
+ Name<ValidationTag> msg = "expected 8 bytes") {
+ return read_little_endian<uint64_t, ValidationTag>(pc, msg);
}
// Reads a variable-length unsigned integer (little endian).
- template <ValidateFlag validate>
+ template <typename ValidationTag>
uint32_t read_u32v(const byte* pc, uint32_t* length,
- const char* name = "LEB32") {
- return read_leb<uint32_t, validate, kNoTrace>(pc, length, name);
+ Name<ValidationTag> name = "LEB32") {
+ return read_leb<uint32_t, ValidationTag, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
- template <ValidateFlag validate>
+ template <typename ValidationTag>
int32_t read_i32v(const byte* pc, uint32_t* length,
- const char* name = "signed LEB32") {
- return read_leb<int32_t, validate, kNoTrace>(pc, length, name);
+ Name<ValidationTag> name = "signed LEB32") {
+ return read_leb<int32_t, ValidationTag, kNoTrace>(pc, length, name);
}
// Reads a variable-length unsigned integer (little endian).
- template <ValidateFlag validate>
+ template <typename ValidationTag>
uint64_t read_u64v(const byte* pc, uint32_t* length,
- const char* name = "LEB64") {
- return read_leb<uint64_t, validate, kNoTrace>(pc, length, name);
+ Name<ValidationTag> name = "LEB64") {
+ return read_leb<uint64_t, ValidationTag, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
- template <ValidateFlag validate>
+ template <typename ValidationTag>
int64_t read_i64v(const byte* pc, uint32_t* length,
- const char* name = "signed LEB64") {
- return read_leb<int64_t, validate, kNoTrace>(pc, length, name);
+ Name<ValidationTag> name = "signed LEB64") {
+ return read_leb<int64_t, ValidationTag, kNoTrace>(pc, length, name);
}
// Reads a variable-length 33-bit signed integer (little endian).
- template <ValidateFlag validate>
+ template <typename ValidationTag>
int64_t read_i33v(const byte* pc, uint32_t* length,
- const char* name = "signed LEB33") {
- return read_leb<int64_t, validate, kNoTrace, 33>(pc, length, name);
+ Name<ValidationTag> name = "signed LEB33") {
+ return read_leb<int64_t, ValidationTag, kNoTrace, 33>(pc, length, name);
}
// Convenient overload for callers who don't care about length.
- template <ValidateFlag validate>
+ template <typename ValidationTag>
WasmOpcode read_prefixed_opcode(const byte* pc) {
uint32_t len;
- return read_prefixed_opcode<validate>(pc, &len);
+ return read_prefixed_opcode<ValidationTag>(pc, &len);
}
// Reads a prefixed-opcode, possibly with variable-length index.
// `length` is set to the number of bytes that make up this opcode,
// *including* the prefix byte. For most opcodes, it will be 2.
- template <ValidateFlag validate>
- WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length,
- const char* name = "prefixed opcode") {
+ template <typename ValidationTag>
+ WasmOpcode read_prefixed_opcode(
+ const byte* pc, uint32_t* length,
+ Name<ValidationTag> name = "prefixed opcode") {
uint32_t index;
// Prefixed opcodes all use LEB128 encoding.
- index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
+ index = read_u32v<ValidationTag>(pc + 1, length, "prefixed opcode index");
*length += 1; // Prefix byte.
// Only support opcodes that go up to 0xFFF (when decoded). Anything
// bigger will need more than 2 bytes, and the '<< 12' below will be wrong.
- if (validate && V8_UNLIKELY(index > 0xfff)) {
+ if (ValidationTag::validate && V8_UNLIKELY(index > 0xfff)) {
errorf(pc, "Invalid prefixed opcode %d", index);
// If size validation fails.
index = 0;
@@ -195,7 +213,7 @@ class Decoder {
uint32_t consume_u32v(const char* name = "var_uint32") {
uint32_t length = 0;
uint32_t result =
- read_leb<uint32_t, kFullValidation, kTrace>(pc_, &length, name);
+ read_leb<uint32_t, FullValidationTag, kTrace>(pc_, &length, name);
pc_ += length;
return result;
}
@@ -203,7 +221,7 @@ class Decoder {
uint32_t consume_u32v(const char* name, Tracer& tracer) {
uint32_t length = 0;
uint32_t result =
- read_leb<uint32_t, kFullValidation, kNoTrace>(pc_, &length, name);
+ read_leb<uint32_t, FullValidationTag, kNoTrace>(pc_, &length, name);
tracer.Bytes(pc_, length);
tracer.Description(name);
pc_ += length;
@@ -214,7 +232,7 @@ class Decoder {
int32_t consume_i32v(const char* name = "var_int32") {
uint32_t length = 0;
int32_t result =
- read_leb<int32_t, kFullValidation, kTrace>(pc_, &length, name);
+ read_leb<int32_t, FullValidationTag, kTrace>(pc_, &length, name);
pc_ += length;
return result;
}
@@ -224,7 +242,7 @@ class Decoder {
uint64_t consume_u64v(const char* name, Tracer& tracer) {
uint32_t length = 0;
uint64_t result =
- read_leb<uint64_t, kFullValidation, kNoTrace>(pc_, &length, name);
+ read_leb<uint64_t, FullValidationTag, kNoTrace>(pc_, &length, name);
tracer.Bytes(pc_, length);
tracer.Description(name);
pc_ += length;
@@ -235,7 +253,7 @@ class Decoder {
int64_t consume_i64v(const char* name = "var_int64") {
uint32_t length = 0;
int64_t result =
- read_leb<int64_t, kFullValidation, kTrace>(pc_, &length, name);
+ read_leb<int64_t, FullValidationTag, kTrace>(pc_, &length, name);
pc_ += length;
return result;
}
@@ -257,10 +275,15 @@ class Decoder {
consume_bytes(size, nullptr);
}
+ uint32_t available_bytes() const {
+ DCHECK_LE(pc_, end_);
+ DCHECK_GE(kMaxUInt32, end_ - pc_);
+ return static_cast<uint32_t>(end_ - pc_);
+ }
+
// Check that at least {size} bytes exist between {pc_} and {end_}.
bool checkAvailable(uint32_t size) {
- DCHECK_LE(pc_, end_);
- if (V8_UNLIKELY(size > static_cast<uint32_t>(end_ - pc_))) {
+ if (V8_UNLIKELY(size > available_bytes())) {
errorf(pc_, "expected %u bytes, fell off end", size);
return false;
}
@@ -401,12 +424,20 @@ class Decoder {
onFirstError();
}
- template <typename IntType, ValidateFlag validate>
- IntType read_little_endian(const byte* pc, const char* msg) {
- if (!validate) {
- DCHECK(validate_size(pc, sizeof(IntType), msg));
- } else if (!validate_size(pc, sizeof(IntType), msg)) {
- return IntType{0};
+ template <typename IntType, typename ValidationTag>
+ IntType read_little_endian(const byte* pc, Name<ValidationTag> msg) {
+ DCHECK_LE(start_, pc);
+
+ if (!ValidationTag::validate) {
+ DCHECK_LE(pc, end_);
+ DCHECK_LE(sizeof(IntType), end_ - pc);
+ } else if (V8_UNLIKELY(ptrdiff_t{sizeof(IntType)} > end_ - pc)) {
+ if (ValidationTag::full_validation) {
+ error(pc, msg);
+ } else {
+ MarkError();
+ }
+ return 0;
}
return base::ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
}
@@ -419,22 +450,23 @@ class Decoder {
pc_ = end_;
return IntType{0};
}
- IntType val = read_little_endian<IntType, kNoValidation>(pc_, name);
+ IntType val = read_little_endian<IntType, NoValidationTag>(pc_, name);
traceByteRange(pc_, pc_ + sizeof(IntType));
TRACE_IF(trace, "= %d\n", val);
pc_ += sizeof(IntType);
return val;
}
- template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ template <typename IntType, typename ValidationTag, TraceFlag trace,
size_t size_in_bits = 8 * sizeof(IntType)>
V8_INLINE IntType read_leb(const byte* pc, uint32_t* length,
- const char* name = "varint") {
+ Name<ValidationTag> name = "varint") {
static_assert(size_in_bits <= 8 * sizeof(IntType),
"leb does not fit in type");
- TRACE_IF(trace, " +%u %-20s: ", pc_offset(), name);
+ TRACE_IF(trace, " +%u %-20s: ", pc_offset(),
+ implicit_cast<const char*>(name));
// Fast path for single-byte integers.
- if ((!validate || V8_LIKELY(pc < end_)) && !(*pc & 0x80)) {
+ if ((!ValidationTag::validate || V8_LIKELY(pc < end_)) && !(*pc & 0x80)) {
TRACE_IF(trace, "%02x ", *pc);
*length = 1;
IntType result = *pc;
@@ -448,29 +480,29 @@ class Decoder {
}
return result;
}
- return read_leb_slowpath<IntType, validate, trace, size_in_bits>(pc, length,
- name);
+ return read_leb_slowpath<IntType, ValidationTag, trace, size_in_bits>(
+ pc, length, name);
}
- template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ template <typename IntType, typename ValidationTag, TraceFlag trace,
size_t size_in_bits = 8 * sizeof(IntType)>
V8_NOINLINE IntType read_leb_slowpath(const byte* pc, uint32_t* length,
- const char* name) {
+ Name<ValidationTag> name) {
// Create an unrolled LEB decoding function per integer type.
- return read_leb_tail<IntType, validate, trace, size_in_bits, 0>(pc, length,
- name, 0);
+ return read_leb_tail<IntType, ValidationTag, trace, size_in_bits, 0>(
+ pc, length, name, 0);
}
- template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ template <typename IntType, typename ValidationTag, TraceFlag trace,
size_t size_in_bits, int byte_index>
V8_INLINE IntType read_leb_tail(const byte* pc, uint32_t* length,
- const char* name, IntType result) {
+ Name<ValidationTag> name, IntType result) {
constexpr bool is_signed = std::is_signed<IntType>::value;
constexpr int kMaxLength = (size_in_bits + 6) / 7;
static_assert(byte_index < kMaxLength, "invalid template instantiation");
constexpr int shift = byte_index * 7;
constexpr bool is_last_byte = byte_index == kMaxLength - 1;
- const bool at_end = validate && pc >= end_;
+ const bool at_end = ValidationTag::validate && pc >= end_;
byte b = 0;
if (V8_LIKELY(!at_end)) {
DCHECK_LT(pc, end_);
@@ -485,13 +517,13 @@ class Decoder {
// Compilers are not smart enough to figure out statically that the
// following call is unreachable if is_last_byte is false.
constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
- return read_leb_tail<IntType, validate, trace, size_in_bits,
+ return read_leb_tail<IntType, ValidationTag, trace, size_in_bits,
next_byte_index>(pc + 1, length, name, result);
}
*length = byte_index + (at_end ? 0 : 1);
- if (validate && V8_UNLIKELY(at_end || (b & 0x80))) {
+ if (ValidationTag::validate && V8_UNLIKELY(at_end || (b & 0x80))) {
TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
- if (validate == kFullValidation) {
+ if constexpr (ValidationTag::full_validation) {
errorf(pc, "expected %s", name);
} else {
MarkError();
@@ -499,7 +531,7 @@ class Decoder {
result = 0;
*length = 0;
}
- if (is_last_byte) {
+ if constexpr (is_last_byte) {
// A signed-LEB128 must sign-extend the final byte, excluding its
// most-significant bit; e.g. for a 32-bit LEB128:
// kExtraBits = 4 (== 32 - (5-1) * 7)
@@ -513,10 +545,10 @@ class Decoder {
const bool valid_extra_bits =
checked_bits == 0 ||
(is_signed && checked_bits == kSignExtendedExtraBits);
- if (!validate) {
+ if (!ValidationTag::validate) {
DCHECK(valid_extra_bits);
} else if (V8_UNLIKELY(!valid_extra_bits)) {
- if (validate == kFullValidation) {
+ if (ValidationTag::full_validation) {
error(pc, "extra bits in varint");
} else {
MarkError();
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index a8a173e0a5..d2df57e83a 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -44,12 +44,12 @@ struct WasmTag;
#define TRACE_INST_FORMAT " @%-8d #%-30s|"
-// Return the evaluation of `condition` if validate==true, DCHECK that it's
-// true and always return true otherwise.
-#define VALIDATE(condition) \
- (validate ? V8_LIKELY(condition) : [&] { \
- DCHECK(condition); \
- return true; \
+// Return the evaluation of {condition} if {ValidationTag::validate} is true,
+// DCHECK that it is true and always return true otherwise.
+#define VALIDATE(condition) \
+ (ValidationTag::validate ? V8_LIKELY(condition) : [&] { \
+ DCHECK(condition); \
+ return true; \
}())
#define CHECK_PROTOTYPE_OPCODE(feat) \
@@ -167,53 +167,49 @@ static constexpr StoreType GetStoreType(WasmOpcode opcode) {
V(I64AtomicStore32U, Uint32)
// Decoder error with explicit PC and format arguments.
-template <Decoder::ValidateFlag validate, typename... Args>
+template <typename ValidationTag, typename... Args>
void DecodeError(Decoder* decoder, const byte* pc, const char* str,
Args&&... args) {
- CHECK(validate == Decoder::kFullValidation ||
- validate == Decoder::kBooleanValidation);
+ if constexpr (!ValidationTag::validate) UNREACHABLE();
static_assert(sizeof...(Args) > 0);
- if (validate == Decoder::kBooleanValidation) {
- decoder->MarkError();
- } else {
+ if constexpr (ValidationTag::full_validation) {
decoder->errorf(pc, str, std::forward<Args>(args)...);
+ } else {
+ decoder->MarkError();
}
}
// Decoder error with explicit PC and no format arguments.
-template <Decoder::ValidateFlag validate>
+template <typename ValidationTag>
void DecodeError(Decoder* decoder, const byte* pc, const char* str) {
- CHECK(validate == Decoder::kFullValidation ||
- validate == Decoder::kBooleanValidation);
- if (validate == Decoder::kBooleanValidation) {
- decoder->MarkError();
- } else {
+ if constexpr (!ValidationTag::validate) UNREACHABLE();
+ if constexpr (ValidationTag::full_validation) {
decoder->error(pc, str);
+ } else {
+ decoder->MarkError();
}
}
// Decoder error without explicit PC, but with format arguments.
-template <Decoder::ValidateFlag validate, typename... Args>
+template <typename ValidationTag, typename... Args>
void DecodeError(Decoder* decoder, const char* str, Args&&... args) {
- CHECK(validate == Decoder::kFullValidation ||
- validate == Decoder::kBooleanValidation);
+ if constexpr (!ValidationTag::validate) UNREACHABLE();
static_assert(sizeof...(Args) > 0);
- if (validate == Decoder::kBooleanValidation) {
- decoder->MarkError();
- } else {
+ if constexpr (ValidationTag::full_validation) {
decoder->errorf(str, std::forward<Args>(args)...);
+ } else {
+ decoder->MarkError();
}
}
// Decoder error without explicit PC and without format arguments.
-template <Decoder::ValidateFlag validate>
+template <typename ValidationTag>
void DecodeError(Decoder* decoder, const char* str) {
- CHECK(validate == Decoder::kFullValidation ||
- validate == Decoder::kBooleanValidation);
- if (validate == Decoder::kBooleanValidation) {
- decoder->MarkError();
- } else {
+ if constexpr (!ValidationTag::validate) UNREACHABLE();
+ if constexpr (ValidationTag::full_validation) {
decoder->error(str);
+ } else {
+ decoder->MarkError();
}
}
@@ -221,16 +217,16 @@ namespace value_type_reader {
// If {module} is not null, the read index will be checked against the module's
// type capacity.
-template <Decoder::ValidateFlag validate>
+template <typename ValidationTag>
HeapType read_heap_type(Decoder* decoder, const byte* pc,
- uint32_t* const length, const WasmModule* module,
- const WasmFeatures& enabled) {
- int64_t heap_index = decoder->read_i33v<validate>(pc, length, "heap type");
+ uint32_t* const length, const WasmFeatures& enabled) {
+ int64_t heap_index =
+ decoder->read_i33v<ValidationTag>(pc, length, "heap type");
if (heap_index < 0) {
int64_t min_1_byte_leb128 = -64;
if (!VALIDATE(heap_index >= min_1_byte_leb128)) {
- DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
- heap_index);
+ DecodeError<ValidationTag>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
return HeapType(HeapType::kBottom);
}
uint8_t uint_7_mask = 0x7F;
@@ -238,14 +234,14 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
switch (code) {
case kEqRefCode:
case kI31RefCode:
- case kDataRefCode:
+ case kStructRefCode:
case kArrayRefCode:
case kAnyRefCode:
case kNoneCode:
case kNoExternCode:
case kNoFuncCode:
if (!VALIDATE(enabled.has_gc())) {
- DecodeError<validate>(
+ DecodeError<ValidationTag>(
decoder, pc,
"invalid heap type '%s', enable with --experimental-wasm-gc",
HeapType::from_code(code).name().c_str());
@@ -259,37 +255,32 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
case kStringViewWtf16Code:
case kStringViewIterCode:
if (!VALIDATE(enabled.has_stringref())) {
- DecodeError<validate>(decoder, pc,
- "invalid heap type '%s', enable with "
- "--experimental-wasm-stringref",
- HeapType::from_code(code).name().c_str());
+ DecodeError<ValidationTag>(decoder, pc,
+ "invalid heap type '%s', enable with "
+ "--experimental-wasm-stringref",
+ HeapType::from_code(code).name().c_str());
}
return HeapType::from_code(code);
default:
- DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
- heap_index);
+ DecodeError<ValidationTag>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
return HeapType(HeapType::kBottom);
}
} else {
if (!VALIDATE(enabled.has_typed_funcref())) {
- DecodeError<validate>(decoder, pc,
- "Invalid indexed heap type, enable with "
- "--experimental-wasm-typed-funcref");
+ DecodeError<ValidationTag>(decoder, pc,
+ "Invalid indexed heap type, enable with "
+ "--experimental-wasm-typed-funcref");
}
uint32_t type_index = static_cast<uint32_t>(heap_index);
if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
- DecodeError<validate>(
+ DecodeError<ValidationTag>(
decoder, pc,
"Type index %u is greater than the maximum number %zu "
"of type definitions supported by V8",
type_index, kV8MaxWasmTypes);
return HeapType(HeapType::kBottom);
}
- // We use capacity over size so this works mid-DecodeTypeSection.
- if (!VALIDATE(module == nullptr || type_index < module->types.capacity())) {
- DecodeError<validate>(decoder, pc, "Type index %u is out of bounds",
- type_index);
- }
return HeapType(type_index);
}
}
@@ -297,15 +288,14 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
// Read a value type starting at address {pc} using {decoder}.
// No bytes are consumed.
// The length of the read value type is written in {length}.
-// Registers an error for an invalid type only if {validate} is not
-// kNoValidate.
-template <Decoder::ValidateFlag validate>
+// Registers an error for an invalid type only if {ValidationTag::validate} is
+// true.
+template <typename ValidationTag>
ValueType read_value_type(Decoder* decoder, const byte* pc,
- uint32_t* const length, const WasmModule* module,
- const WasmFeatures& enabled) {
+ uint32_t* const length, const WasmFeatures& enabled) {
*length = 1;
- byte val = decoder->read_u8<validate>(pc, "value type opcode");
- if (decoder->failed()) {
+ byte val = decoder->read_u8<ValidationTag>(pc, "value type opcode");
+ if (!VALIDATE(decoder->ok())) {
*length = 0;
return kWasmBottom;
}
@@ -313,14 +303,14 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
switch (code) {
case kEqRefCode:
case kI31RefCode:
- case kDataRefCode:
+ case kStructRefCode:
case kArrayRefCode:
case kAnyRefCode:
case kNoneCode:
case kNoExternCode:
case kNoFuncCode:
if (!VALIDATE(enabled.has_gc())) {
- DecodeError<validate>(
+ DecodeError<ValidationTag>(
decoder, pc,
"invalid value type '%sref', enable with --experimental-wasm-gc",
HeapType::from_code(code).name().c_str());
@@ -335,10 +325,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kStringViewWtf16Code:
case kStringViewIterCode: {
if (!VALIDATE(enabled.has_stringref())) {
- DecodeError<validate>(decoder, pc,
- "invalid value type '%sref', enable with "
- "--experimental-wasm-stringref",
- HeapType::from_code(code).name().c_str());
+ DecodeError<ValidationTag>(decoder, pc,
+ "invalid value type '%sref', enable with "
+ "--experimental-wasm-stringref",
+ HeapType::from_code(code).name().c_str());
return kWasmBottom;
}
return ValueType::RefNull(HeapType::from_code(code));
@@ -355,28 +345,23 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kRefNullCode: {
Nullability nullability = code == kRefNullCode ? kNullable : kNonNullable;
if (!VALIDATE(enabled.has_typed_funcref())) {
- DecodeError<validate>(decoder, pc,
- "Invalid type '(ref%s <heaptype>)', enable with "
- "--experimental-wasm-typed-funcref",
- nullability == kNullable ? " null" : "");
+ DecodeError<ValidationTag>(
+ decoder, pc,
+ "Invalid type '(ref%s <heaptype>)', enable with "
+ "--experimental-wasm-typed-funcref",
+ nullability == kNullable ? " null" : "");
return kWasmBottom;
}
HeapType heap_type =
- read_heap_type<validate>(decoder, pc + 1, length, module, enabled);
+ read_heap_type<ValidationTag>(decoder, pc + 1, length, enabled);
*length += 1;
return heap_type.is_bottom()
? kWasmBottom
: ValueType::RefMaybeNull(heap_type, nullability);
}
case kS128Code: {
- if (!VALIDATE(enabled.has_simd())) {
- DecodeError<validate>(
- decoder, pc,
- "invalid value type 's128', enable with --experimental-wasm-simd");
- return kWasmBottom;
- }
if (!VALIDATE(CheckHardwareSupportsSimd())) {
- DecodeError<validate>(decoder, pc, "Wasm SIMD unsupported");
+ DecodeError<ValidationTag>(decoder, pc, "Wasm SIMD unsupported");
return kWasmBottom;
}
return kWasmS128;
@@ -387,179 +372,216 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kVoidCode:
case kI8Code:
case kI16Code:
- if (validate) {
- DecodeError<validate>(decoder, pc, "invalid value type 0x%x", code);
+ if (ValidationTag::validate) {
+ DecodeError<ValidationTag>(decoder, pc, "invalid value type 0x%x",
+ code);
}
return kWasmBottom;
}
// Anything that doesn't match an enumeration value is an invalid type code.
- if (validate) {
- DecodeError<validate>(decoder, pc, "invalid value type 0x%x", code);
+ if (ValidationTag::validate) {
+ DecodeError<ValidationTag>(decoder, pc, "invalid value type 0x%x", code);
}
return kWasmBottom;
}
+
+template <typename ValidationTag>
+bool ValidateHeapType(Decoder* decoder, const byte* pc,
+ const WasmModule* module, HeapType type) {
+ if (!type.is_index()) return true;
+ // A {nullptr} module is accepted if we are not validating anyway (e.g. for
+ // opcode length computation).
+ if (!ValidationTag::validate && module == nullptr) return true;
+ // We use capacity over size so this works mid-DecodeTypeSection.
+ if (!VALIDATE(type.ref_index() < module->types.capacity())) {
+ DecodeError<ValidationTag>(decoder, pc, "Type index %u is out of bounds",
+ type.ref_index());
+ return false;
+ }
+ return true;
+}
+
+template <typename ValidationTag>
+bool ValidateValueType(Decoder* decoder, const byte* pc,
+ const WasmModule* module, ValueType type) {
+ if (V8_LIKELY(!type.is_object_reference())) return true;
+ return ValidateHeapType<ValidationTag>(decoder, pc, module, type.heap_type());
+}
+
} // namespace value_type_reader
enum DecodingMode { kFunctionBody, kConstantExpression };
// Helpers for decoding different kinds of immediates which follow bytecodes.
-template <Decoder::ValidateFlag validate>
struct ImmI32Immediate {
int32_t value;
uint32_t length;
- ImmI32Immediate(Decoder* decoder, const byte* pc) {
- value = decoder->read_i32v<validate>(pc, &length, "immi32");
+
+ template <typename ValidationTag>
+ ImmI32Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
+ value = decoder->read_i32v<ValidationTag>(pc, &length, "immi32");
}
};
-template <Decoder::ValidateFlag validate>
struct ImmI64Immediate {
int64_t value;
uint32_t length;
- ImmI64Immediate(Decoder* decoder, const byte* pc) {
- value = decoder->read_i64v<validate>(pc, &length, "immi64");
+
+ template <typename ValidationTag>
+ ImmI64Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
+ value = decoder->read_i64v<ValidationTag>(pc, &length, "immi64");
}
};
-template <Decoder::ValidateFlag validate>
struct ImmF32Immediate {
float value;
uint32_t length = 4;
- ImmF32Immediate(Decoder* decoder, const byte* pc) {
+
+ template <typename ValidationTag>
+ ImmF32Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
// We can't use base::bit_cast here because calling any helper function
// that returns a float would potentially flip NaN bits per C++ semantics,
// so we have to inline the memcpy call directly.
- uint32_t tmp = decoder->read_u32<validate>(pc, "immf32");
+ uint32_t tmp = decoder->read_u32<ValidationTag>(pc, "immf32");
memcpy(&value, &tmp, sizeof(value));
}
};
-template <Decoder::ValidateFlag validate>
struct ImmF64Immediate {
double value;
uint32_t length = 8;
- ImmF64Immediate(Decoder* decoder, const byte* pc) {
+
+ template <typename ValidationTag>
+ ImmF64Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
// Avoid base::bit_cast because it might not preserve the signalling bit
// of a NaN.
- uint64_t tmp = decoder->read_u64<validate>(pc, "immf64");
+ uint64_t tmp = decoder->read_u64<ValidationTag>(pc, "immf64");
memcpy(&value, &tmp, sizeof(value));
}
};
-// This is different than IndexImmediate because {index} is a byte.
-template <Decoder::ValidateFlag validate>
struct MemoryIndexImmediate {
uint8_t index = 0;
uint32_t length = 1;
- MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u8<validate>(pc, "memory index");
+
+ template <typename ValidationTag>
+ MemoryIndexImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
+ index = decoder->read_u8<ValidationTag>(pc, "memory index");
}
};
// Parent class for all Immediates which read a u32v index value in their
// constructor.
-template <Decoder::ValidateFlag validate>
struct IndexImmediate {
uint32_t index;
uint32_t length;
- IndexImmediate(Decoder* decoder, const byte* pc, const char* name) {
- index = decoder->read_u32v<validate>(pc, &length, name);
+ template <typename ValidationTag>
+ IndexImmediate(Decoder* decoder, const byte* pc, const char* name,
+ ValidationTag = {}) {
+ index = decoder->read_u32v<ValidationTag>(pc, &length, name);
}
};
-template <Decoder::ValidateFlag validate>
-struct TagIndexImmediate : public IndexImmediate<validate> {
+struct TagIndexImmediate : public IndexImmediate {
const WasmTag* tag = nullptr;
- TagIndexImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "tag index") {}
+ template <typename ValidationTag>
+ TagIndexImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : IndexImmediate(decoder, pc, "tag index", validate) {}
};
-template <Decoder::ValidateFlag validate>
-struct GlobalIndexImmediate : public IndexImmediate<validate> {
+struct GlobalIndexImmediate : public IndexImmediate {
const WasmGlobal* global = nullptr;
- GlobalIndexImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "global index") {}
+ template <typename ValidationTag>
+ GlobalIndexImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : IndexImmediate(decoder, pc, "global index", validate) {}
};
-template <Decoder::ValidateFlag validate>
-struct SigIndexImmediate : public IndexImmediate<validate> {
+struct SigIndexImmediate : public IndexImmediate {
const FunctionSig* sig = nullptr;
- SigIndexImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "signature index") {}
+ template <typename ValidationTag>
+ SigIndexImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : IndexImmediate(decoder, pc, "signature index", validate) {}
};
-template <Decoder::ValidateFlag validate>
-struct StructIndexImmediate : public IndexImmediate<validate> {
+struct StructIndexImmediate : public IndexImmediate {
const StructType* struct_type = nullptr;
- StructIndexImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "struct index") {}
+ template <typename ValidationTag>
+ StructIndexImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : IndexImmediate(decoder, pc, "struct index", validate) {}
};
-template <Decoder::ValidateFlag validate>
-struct ArrayIndexImmediate : public IndexImmediate<validate> {
+struct ArrayIndexImmediate : public IndexImmediate {
const ArrayType* array_type = nullptr;
- ArrayIndexImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "array index") {}
+ template <typename ValidationTag>
+ ArrayIndexImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : IndexImmediate(decoder, pc, "array index", validate) {}
};
-template <Decoder::ValidateFlag validate>
-struct CallFunctionImmediate : public IndexImmediate<validate> {
+
+struct CallFunctionImmediate : public IndexImmediate {
const FunctionSig* sig = nullptr;
- CallFunctionImmediate(Decoder* decoder, const byte* pc)
- : IndexImmediate<validate>(decoder, pc, "function index") {}
+ template <typename ValidationTag>
+ CallFunctionImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : IndexImmediate(decoder, pc, "function index", validate) {}
};
-template <Decoder::ValidateFlag validate>
struct SelectTypeImmediate {
uint32_t length;
ValueType type;
+ template <typename ValidationTag>
SelectTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, const WasmModule* module) {
- uint8_t num_types =
- decoder->read_u32v<validate>(pc, &length, "number of select types");
+ const byte* pc, ValidationTag = {}) {
+ uint8_t num_types = decoder->read_u32v<ValidationTag>(
+ pc, &length, "number of select types");
if (!VALIDATE(num_types == 1)) {
- DecodeError<validate>(
+ DecodeError<ValidationTag>(
decoder, pc,
"Invalid number of types. Select accepts exactly one type");
return;
}
uint32_t type_length;
- type = value_type_reader::read_value_type<validate>(
- decoder, pc + length, &type_length, module, enabled);
+ type = value_type_reader::read_value_type<ValidationTag>(
+ decoder, pc + length, &type_length, enabled);
length += type_length;
}
};
-template <Decoder::ValidateFlag validate>
struct BlockTypeImmediate {
uint32_t length = 1;
ValueType type = kWasmVoid;
uint32_t sig_index = 0;
const FunctionSig* sig = nullptr;
+ template <typename ValidationTag>
BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, const WasmModule* module) {
+ const byte* pc, ValidationTag = {}) {
int64_t block_type =
- decoder->read_i33v<validate>(pc, &length, "block type");
+ decoder->read_i33v<ValidationTag>(pc, &length, "block type");
if (block_type < 0) {
// All valid negative types are 1 byte in length, so we check against the
// minimum 1-byte LEB128 value.
constexpr int64_t min_1_byte_leb128 = -64;
if (!VALIDATE(block_type >= min_1_byte_leb128)) {
- DecodeError<validate>(decoder, pc, "invalid block type %" PRId64,
- block_type);
+ DecodeError<ValidationTag>(decoder, pc, "invalid block type %" PRId64,
+ block_type);
return;
}
if (static_cast<ValueTypeCode>(block_type & 0x7F) == kVoidCode) return;
- type = value_type_reader::read_value_type<validate>(decoder, pc, &length,
- module, enabled);
+ type = value_type_reader::read_value_type<ValidationTag>(
+ decoder, pc, &length, enabled);
} else {
type = kWasmBottom;
sig_index = static_cast<uint32_t>(block_type);
@@ -587,53 +609,58 @@ struct BlockTypeImmediate {
}
};
-template <Decoder::ValidateFlag validate>
struct BranchDepthImmediate {
uint32_t depth;
uint32_t length;
- BranchDepthImmediate(Decoder* decoder, const byte* pc) {
- depth = decoder->read_u32v<validate>(pc, &length, "branch depth");
+
+ template <typename ValidationTag>
+ BranchDepthImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
+ depth = decoder->read_u32v<ValidationTag>(pc, &length, "branch depth");
}
};
-template <Decoder::ValidateFlag validate>
struct FieldImmediate {
- StructIndexImmediate<validate> struct_imm;
- IndexImmediate<validate> field_imm;
+ StructIndexImmediate struct_imm;
+ IndexImmediate field_imm;
uint32_t length;
- FieldImmediate(Decoder* decoder, const byte* pc)
- : struct_imm(decoder, pc),
- field_imm(decoder, pc + struct_imm.length, "field index"),
+
+ template <typename ValidationTag>
+ FieldImmediate(Decoder* decoder, const byte* pc, ValidationTag validate = {})
+ : struct_imm(decoder, pc, validate),
+ field_imm(decoder, pc + struct_imm.length, "field index", validate),
length(struct_imm.length + field_imm.length) {}
};
-template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
- IndexImmediate<validate> sig_imm;
- IndexImmediate<validate> table_imm;
+ IndexImmediate sig_imm;
+ IndexImmediate table_imm;
uint32_t length;
const FunctionSig* sig = nullptr;
- CallIndirectImmediate(Decoder* decoder, const byte* pc)
- : sig_imm(decoder, pc, "singature index"),
- table_imm(decoder, pc + sig_imm.length, "table index"),
+
+ template <typename ValidationTag>
+ CallIndirectImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : sig_imm(decoder, pc, "singature index", validate),
+ table_imm(decoder, pc + sig_imm.length, "table index", validate),
length(sig_imm.length + table_imm.length) {}
};
-template <Decoder::ValidateFlag validate>
struct BranchTableImmediate {
uint32_t table_count;
const byte* start;
const byte* table;
- BranchTableImmediate(Decoder* decoder, const byte* pc) {
+
+ template <typename ValidationTag>
+ BranchTableImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
start = pc;
uint32_t len = 0;
- table_count = decoder->read_u32v<validate>(pc, &len, "table count");
+ table_count = decoder->read_u32v<ValidationTag>(pc, &len, "table count");
table = pc + len;
}
};
// A helper to iterate over a branch table.
-template <Decoder::ValidateFlag validate>
+template <typename ValidationTag>
class BranchTableIterator {
public:
uint32_t cur_index() { return index_; }
@@ -643,7 +670,7 @@ class BranchTableIterator {
index_++;
uint32_t length;
uint32_t result =
- decoder_->read_u32v<validate>(pc_, &length, "branch table entry");
+ decoder_->read_u32v<ValidationTag>(pc_, &length, "branch table entry");
pc_ += length;
return result;
}
@@ -655,8 +682,7 @@ class BranchTableIterator {
}
const byte* pc() { return pc_; }
- BranchTableIterator(Decoder* decoder,
- const BranchTableImmediate<validate>& imm)
+ BranchTableIterator(Decoder* decoder, const BranchTableImmediate& imm)
: decoder_(decoder),
start_(imm.start),
pc_(imm.table),
@@ -670,136 +696,141 @@ class BranchTableIterator {
const uint32_t table_count_; // the count of entries, not including default.
};
-template <Decoder::ValidateFlag validate,
- DecodingMode decoding_mode = kFunctionBody>
-class WasmDecoder;
-
-template <Decoder::ValidateFlag validate>
struct MemoryAccessImmediate {
uint32_t alignment;
uint64_t offset;
uint32_t length = 0;
+
+ template <typename ValidationTag>
MemoryAccessImmediate(Decoder* decoder, const byte* pc,
- uint32_t max_alignment, bool is_memory64) {
+ uint32_t max_alignment, bool is_memory64,
+ ValidationTag = {}) {
uint32_t alignment_length;
alignment =
- decoder->read_u32v<validate>(pc, &alignment_length, "alignment");
+ decoder->read_u32v<ValidationTag>(pc, &alignment_length, "alignment");
if (!VALIDATE(alignment <= max_alignment)) {
- DecodeError<validate>(
+ DecodeError<ValidationTag>(
decoder, pc,
"invalid alignment; expected maximum alignment is %u, "
"actual alignment is %u",
max_alignment, alignment);
}
uint32_t offset_length;
- offset = is_memory64 ? decoder->read_u64v<validate>(
+ offset = is_memory64 ? decoder->read_u64v<ValidationTag>(
pc + alignment_length, &offset_length, "offset")
- : decoder->read_u32v<validate>(
+ : decoder->read_u32v<ValidationTag>(
pc + alignment_length, &offset_length, "offset");
length = alignment_length + offset_length;
}
};
// Immediate for SIMD lane operations.
-template <Decoder::ValidateFlag validate>
struct SimdLaneImmediate {
uint8_t lane;
uint32_t length = 1;
- SimdLaneImmediate(Decoder* decoder, const byte* pc) {
- lane = decoder->read_u8<validate>(pc, "lane");
+ template <typename ValidationTag>
+ SimdLaneImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
+ lane = decoder->read_u8<ValidationTag>(pc, "lane");
}
};
// Immediate for SIMD S8x16 shuffle operations.
-template <Decoder::ValidateFlag validate>
struct Simd128Immediate {
uint8_t value[kSimd128Size] = {0};
- Simd128Immediate(Decoder* decoder, const byte* pc) {
+ template <typename ValidationTag>
+ Simd128Immediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
- value[i] = decoder->read_u8<validate>(pc + i, "value");
+ value[i] = decoder->read_u8<ValidationTag>(pc + i, "value");
}
}
};
-template <Decoder::ValidateFlag validate>
struct MemoryInitImmediate {
- IndexImmediate<validate> data_segment;
- MemoryIndexImmediate<validate> memory;
+ IndexImmediate data_segment;
+ MemoryIndexImmediate memory;
uint32_t length;
- MemoryInitImmediate(Decoder* decoder, const byte* pc)
- : data_segment(decoder, pc, "data segment index"),
- memory(decoder, pc + data_segment.length),
+ template <typename ValidationTag>
+ MemoryInitImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : data_segment(decoder, pc, "data segment index", validate),
+ memory(decoder, pc + data_segment.length, validate),
length(data_segment.length + memory.length) {}
};
-template <Decoder::ValidateFlag validate>
struct MemoryCopyImmediate {
- MemoryIndexImmediate<validate> memory_src;
- MemoryIndexImmediate<validate> memory_dst;
+ MemoryIndexImmediate memory_src;
+ MemoryIndexImmediate memory_dst;
uint32_t length;
- MemoryCopyImmediate(Decoder* decoder, const byte* pc)
- : memory_src(decoder, pc),
- memory_dst(decoder, pc + memory_src.length),
+ template <typename ValidationTag>
+ MemoryCopyImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : memory_src(decoder, pc, validate),
+ memory_dst(decoder, pc + memory_src.length, validate),
length(memory_src.length + memory_dst.length) {}
};
-template <Decoder::ValidateFlag validate>
struct TableInitImmediate {
- IndexImmediate<validate> element_segment;
- IndexImmediate<validate> table;
+ IndexImmediate element_segment;
+ IndexImmediate table;
uint32_t length;
- TableInitImmediate(Decoder* decoder, const byte* pc)
- : element_segment(decoder, pc, "element segment index"),
- table(decoder, pc + element_segment.length, "table index"),
+ template <typename ValidationTag>
+ TableInitImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : element_segment(decoder, pc, "element segment index", validate),
+ table(decoder, pc + element_segment.length, "table index", validate),
length(element_segment.length + table.length) {}
};
-template <Decoder::ValidateFlag validate>
struct TableCopyImmediate {
- IndexImmediate<validate> table_dst;
- IndexImmediate<validate> table_src;
+ IndexImmediate table_dst;
+ IndexImmediate table_src;
uint32_t length;
- TableCopyImmediate(Decoder* decoder, const byte* pc)
- : table_dst(decoder, pc, "table index"),
- table_src(decoder, pc + table_dst.length, "table index"),
+ template <typename ValidationTag>
+ TableCopyImmediate(Decoder* decoder, const byte* pc,
+ ValidationTag validate = {})
+ : table_dst(decoder, pc, "table index", validate),
+ table_src(decoder, pc + table_dst.length, "table index", validate),
length(table_src.length + table_dst.length) {}
};
-template <Decoder::ValidateFlag validate>
struct HeapTypeImmediate {
uint32_t length = 1;
HeapType type;
+
+ template <typename ValidationTag>
HeapTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, const WasmModule* module)
- : type(value_type_reader::read_heap_type<validate>(decoder, pc, &length,
- module, enabled)) {}
+ const byte* pc, ValidationTag = {})
+ : type(value_type_reader::read_heap_type<ValidationTag>(
+ decoder, pc, &length, enabled)) {}
};
-template <Decoder::ValidateFlag validate>
struct StringConstImmediate {
uint32_t index;
uint32_t length;
- StringConstImmediate(Decoder* decoder, const byte* pc) {
- index =
- decoder->read_u32v<validate>(pc, &length, "stringref literal index");
+
+ template <typename ValidationTag>
+ StringConstImmediate(Decoder* decoder, const byte* pc, ValidationTag = {}) {
+ index = decoder->read_u32v<ValidationTag>(pc, &length,
+ "stringref literal index");
}
};
-template <Decoder::ValidateFlag validate>
+template <bool full_validation>
struct PcForErrors {
+ static_assert(full_validation == false);
explicit PcForErrors(const byte* /* pc */) {}
const byte* pc() const { return nullptr; }
};
template <>
-struct PcForErrors<Decoder::kFullValidation> {
+struct PcForErrors<true> {
const byte* pc_for_errors = nullptr;
explicit PcForErrors(const byte* pc) : pc_for_errors(pc) {}
@@ -808,12 +839,12 @@ struct PcForErrors<Decoder::kFullValidation> {
};
// An entry on the value stack.
-template <Decoder::ValidateFlag validate>
-struct ValueBase : public PcForErrors<validate> {
+template <typename ValidationTag>
+struct ValueBase : public PcForErrors<ValidationTag::full_validation> {
ValueType type = kWasmVoid;
ValueBase(const byte* pc, ValueType type)
- : PcForErrors<validate>(pc), type(type) {}
+ : PcForErrors<ValidationTag::full_validation>(pc), type(type) {}
};
template <typename Value>
@@ -856,8 +887,8 @@ enum Reachability : uint8_t {
};
// An entry on the control stack (i.e. if, block, loop, or try).
-template <typename Value, Decoder::ValidateFlag validate>
-struct ControlBase : public PcForErrors<validate> {
+template <typename Value, typename ValidationTag>
+struct ControlBase : public PcForErrors<ValidationTag::full_validation> {
ControlKind kind = kControlBlock;
Reachability reachability = kReachable;
uint32_t stack_depth = 0; // Stack height at the beginning of the construct.
@@ -874,7 +905,7 @@ struct ControlBase : public PcForErrors<validate> {
ControlBase(ControlKind kind, uint32_t stack_depth, uint32_t init_stack_depth,
const uint8_t* pc, Reachability reachability)
- : PcForErrors<validate>(pc),
+ : PcForErrors<ValidationTag::full_validation>(pc),
kind(kind),
reachability(reachability),
stack_depth(stack_depth),
@@ -933,29 +964,29 @@ struct ControlBase : public PcForErrors<validate> {
F(I64Const, Value* result, int64_t value) \
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
- F(S128Const, Simd128Immediate<validate>& imm, Value* result) \
- F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(S128Const, Simd128Immediate& imm, Value* result) \
+ F(GlobalGet, Value* result, const GlobalIndexImmediate& imm) \
F(DoReturn, uint32_t drop_values) \
F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
Value* result) \
F(RefNull, ValueType type, Value* result) \
F(RefFunc, uint32_t function_index, Value* result) \
- F(StructNew, const StructIndexImmediate<validate>& imm, const Value& rtt, \
+ F(StructNew, const StructIndexImmediate& imm, const Value& rtt, \
const Value args[], Value* result) \
- F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
- const Value& rtt, Value* result) \
- F(ArrayNew, const ArrayIndexImmediate<validate>& imm, const Value& length, \
+ F(StructNewDefault, const StructIndexImmediate& imm, const Value& rtt, \
+ Value* result) \
+ F(ArrayNew, const ArrayIndexImmediate& imm, const Value& length, \
const Value& initial_value, const Value& rtt, Value* result) \
- F(ArrayNewDefault, const ArrayIndexImmediate<validate>& imm, \
- const Value& length, const Value& rtt, Value* result) \
- F(ArrayNewFixed, const ArrayIndexImmediate<validate>& imm, \
+ F(ArrayNewDefault, const ArrayIndexImmediate& imm, const Value& length, \
+ const Value& rtt, Value* result) \
+ F(ArrayNewFixed, const ArrayIndexImmediate& imm, \
const base::Vector<Value>& elements, const Value& rtt, Value* result) \
- F(ArrayNewSegment, const ArrayIndexImmediate<validate>& array_imm, \
- const IndexImmediate<validate>& data_segment, const Value& offset, \
+ F(ArrayNewSegment, const ArrayIndexImmediate& array_imm, \
+ const IndexImmediate& data_segment, const Value& offset, \
const Value& length, const Value& rtt, Value* result) \
F(I31New, const Value& input, Value* result) \
F(RttCanon, uint32_t type_index, Value* result) \
- F(StringConst, const StringConstImmediate<validate>& imm, Value* result)
+ F(StringConst, const StringConstImmediate& imm, Value* result)
#define INTERFACE_NON_CONSTANT_FUNCTIONS(F) /* force 80 columns */ \
/* Control: */ \
@@ -969,15 +1000,13 @@ struct ControlBase : public PcForErrors<validate> {
F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
F(RefAsNonNull, const Value& arg, Value* result) \
F(Drop) \
- F(LocalGet, Value* result, const IndexImmediate<validate>& imm) \
- F(LocalSet, const Value& value, const IndexImmediate<validate>& imm) \
- F(LocalTee, const Value& value, Value* result, \
- const IndexImmediate<validate>& imm) \
- F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(TableGet, const Value& index, Value* result, \
- const IndexImmediate<validate>& imm) \
+ F(LocalGet, Value* result, const IndexImmediate& imm) \
+ F(LocalSet, const Value& value, const IndexImmediate& imm) \
+ F(LocalTee, const Value& value, Value* result, const IndexImmediate& imm) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate& imm) \
+ F(TableGet, const Value& index, Value* result, const IndexImmediate& imm) \
F(TableSet, const Value& index, const Value& value, \
- const IndexImmediate<validate>& imm) \
+ const IndexImmediate& imm) \
F(Trap, TrapReason reason) \
F(NopForTestingUnsupportedInLiftoff) \
F(Forward, const Value& from, Value* to) \
@@ -985,82 +1014,73 @@ struct ControlBase : public PcForErrors<validate> {
Value* result) \
F(BrOrRet, uint32_t depth, uint32_t drop_values) \
F(BrIf, const Value& cond, uint32_t depth) \
- F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
+ F(BrTable, const BranchTableImmediate& imm, const Value& key) \
F(Else, Control* if_block) \
- F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
+ F(LoadMem, LoadType type, const MemoryAccessImmediate& imm, \
const Value& index, Value* result) \
F(LoadTransform, LoadType type, LoadTransformationKind transform, \
- const MemoryAccessImmediate<validate>& imm, const Value& index, \
- Value* result) \
+ const MemoryAccessImmediate& imm, const Value& index, Value* result) \
F(LoadLane, LoadType type, const Value& value, const Value& index, \
- const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
- Value* result) \
- F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const MemoryAccessImmediate& imm, const uint8_t laneidx, Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessImmediate& imm, \
const Value& index, const Value& value) \
- F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ F(StoreLane, StoreType type, const MemoryAccessImmediate& imm, \
const Value& index, const Value& value, const uint8_t laneidx) \
F(CurrentMemoryPages, Value* result) \
F(MemoryGrow, const Value& value, Value* result) \
- F(CallDirect, const CallFunctionImmediate<validate>& imm, \
- const Value args[], Value returns[]) \
- F(CallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[], \
+ F(CallDirect, const CallFunctionImmediate& imm, const Value args[], \
Value returns[]) \
+ F(CallIndirect, const Value& index, const CallIndirectImmediate& imm, \
+ const Value args[], Value returns[]) \
F(CallRef, const Value& func_ref, const FunctionSig* sig, \
uint32_t sig_index, const Value args[], const Value returns[]) \
F(ReturnCallRef, const Value& func_ref, const FunctionSig* sig, \
uint32_t sig_index, const Value args[]) \
- F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
+ F(ReturnCall, const CallFunctionImmediate& imm, const Value args[]) \
+ F(ReturnCallIndirect, const Value& index, const CallIndirectImmediate& imm, \
const Value args[]) \
- F(ReturnCallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[]) \
F(BrOnNull, const Value& ref_object, uint32_t depth, \
bool pass_null_along_branch, Value* result_on_fallthrough) \
F(BrOnNonNull, const Value& ref_object, Value* result, uint32_t depth, \
bool drop_null_on_fallthrough) \
F(SimdOp, WasmOpcode opcode, base::Vector<Value> args, Value* result) \
- F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
+ F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate& imm, \
const base::Vector<Value> inputs, Value* result) \
- F(S128Const, const Simd128Immediate<validate>& imm, Value* result) \
- F(Simd8x16ShuffleOp, const Simd128Immediate<validate>& imm, \
- const Value& input0, const Value& input1, Value* result) \
- F(Throw, const TagIndexImmediate<validate>& imm, \
- const base::Vector<Value>& args) \
+ F(S128Const, const Simd128Immediate& imm, Value* result) \
+ F(Simd8x16ShuffleOp, const Simd128Immediate& imm, const Value& input0, \
+ const Value& input1, Value* result) \
+ F(Throw, const TagIndexImmediate& imm, const base::Vector<Value>& args) \
F(Rethrow, Control* block) \
- F(CatchException, const TagIndexImmediate<validate>& imm, Control* block, \
+ F(CatchException, const TagIndexImmediate& imm, Control* block, \
base::Vector<Value> caught_values) \
F(Delegate, uint32_t depth, Control* block) \
F(CatchAll, Control* block) \
F(AtomicOp, WasmOpcode opcode, base::Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result) \
+ const MemoryAccessImmediate& imm, Value* result) \
F(AtomicFence) \
- F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ F(MemoryInit, const MemoryInitImmediate& imm, const Value& dst, \
const Value& src, const Value& size) \
- F(DataDrop, const IndexImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
+ F(DataDrop, const IndexImmediate& imm) \
+ F(MemoryCopy, const MemoryCopyImmediate& imm, const Value& dst, \
const Value& src, const Value& size) \
- F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ F(MemoryFill, const MemoryIndexImmediate& imm, const Value& dst, \
const Value& value, const Value& size) \
- F(TableInit, const TableInitImmediate<validate>& imm, \
- base::Vector<Value> args) \
- F(ElemDrop, const IndexImmediate<validate>& imm) \
- F(TableCopy, const TableCopyImmediate<validate>& imm, \
- base::Vector<Value> args) \
- F(TableGrow, const IndexImmediate<validate>& imm, const Value& value, \
+ F(TableInit, const TableInitImmediate& imm, base::Vector<Value> args) \
+ F(ElemDrop, const IndexImmediate& imm) \
+ F(TableCopy, const TableCopyImmediate& imm, base::Vector<Value> args) \
+ F(TableGrow, const IndexImmediate& imm, const Value& value, \
const Value& delta, Value* result) \
- F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
- F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
+ F(TableSize, const IndexImmediate& imm, Value* result) \
+ F(TableFill, const IndexImmediate& imm, const Value& start, \
const Value& value, const Value& count) \
- F(StructGet, const Value& struct_object, \
- const FieldImmediate<validate>& field, bool is_signed, Value* result) \
- F(StructSet, const Value& struct_object, \
- const FieldImmediate<validate>& field, const Value& field_value) \
- F(ArrayGet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ F(StructGet, const Value& struct_object, const FieldImmediate& field, \
bool is_signed, Value* result) \
- F(ArraySet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- const Value& value) \
+ F(StructSet, const Value& struct_object, const FieldImmediate& field, \
+ const Value& field_value) \
+ F(ArrayGet, const Value& array_obj, const ArrayIndexImmediate& imm, \
+ const Value& index, bool is_signed, Value* result) \
+ F(ArraySet, const Value& array_obj, const ArrayIndexImmediate& imm, \
+ const Value& index, const Value& value) \
F(ArrayLen, const Value& array_obj, Value* result) \
F(ArrayCopy, const Value& src, const Value& src_index, const Value& dst, \
const Value& dst_index, const Value& length) \
@@ -1070,47 +1090,52 @@ struct ControlBase : public PcForErrors<validate> {
bool null_succeeds) \
F(RefTestAbstract, const Value& obj, HeapType type, Value* result, \
bool null_succeeds) \
- F(RefCast, const Value& obj, const Value& rtt, Value* result) \
+ F(RefCast, const Value& obj, const Value& rtt, Value* result, \
+ bool null_succeeds) \
+ F(RefCastAbstract, const Value& obj, HeapType type, Value* result, \
+ bool null_succeeds) \
F(AssertNull, const Value& obj, Value* result) \
+ F(AssertNotNull, const Value& obj, Value* result) \
F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
uint32_t depth) \
F(BrOnCastFail, const Value& obj, const Value& rtt, \
Value* result_on_fallthrough, uint32_t depth) \
- F(RefIsData, const Value& object, Value* result) \
+ F(RefIsStruct, const Value& object, Value* result) \
F(RefIsEq, const Value& object, Value* result) \
F(RefIsI31, const Value& object, Value* result) \
F(RefIsArray, const Value& object, Value* result) \
- F(RefAsData, const Value& object, Value* result) \
+ F(RefAsStruct, const Value& object, Value* result) \
F(RefAsI31, const Value& object, Value* result) \
F(RefAsArray, const Value& object, Value* result) \
- F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnStruct, const Value& object, Value* value_on_branch, \
+ uint32_t br_depth) \
F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
F(BrOnArray, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(BrOnNonData, const Value& object, Value* value_on_fallthrough, \
+ F(BrOnNonStruct, const Value& object, Value* value_on_fallthrough, \
uint32_t br_depth) \
F(BrOnNonI31, const Value& object, Value* value_on_fallthrough, \
uint32_t br_depth) \
F(BrOnNonArray, const Value& object, Value* value_on_fallthrough, \
uint32_t br_depth) \
- F(StringNewWtf8, const MemoryIndexImmediate<validate>& memory, \
+ F(StringNewWtf8, const MemoryIndexImmediate& memory, \
const unibrow::Utf8Variant variant, const Value& offset, \
const Value& size, Value* result) \
F(StringNewWtf8Array, const unibrow::Utf8Variant variant, \
const Value& array, const Value& start, const Value& end, Value* result) \
- F(StringNewWtf16, const MemoryIndexImmediate<validate>& memory, \
- const Value& offset, const Value& size, Value* result) \
+ F(StringNewWtf16, const MemoryIndexImmediate& memory, const Value& offset, \
+ const Value& size, Value* result) \
F(StringNewWtf16Array, const Value& array, const Value& start, \
const Value& end, Value* result) \
F(StringMeasureWtf8, const unibrow::Utf8Variant variant, const Value& str, \
Value* result) \
F(StringMeasureWtf16, const Value& str, Value* result) \
- F(StringEncodeWtf8, const MemoryIndexImmediate<validate>& memory, \
+ F(StringEncodeWtf8, const MemoryIndexImmediate& memory, \
const unibrow::Utf8Variant variant, const Value& str, \
const Value& address, Value* result) \
F(StringEncodeWtf8Array, const unibrow::Utf8Variant variant, \
const Value& str, const Value& array, const Value& start, Value* result) \
- F(StringEncodeWtf16, const MemoryIndexImmediate<validate>& memory, \
- const Value& str, const Value& address, Value* result) \
+ F(StringEncodeWtf16, const MemoryIndexImmediate& memory, const Value& str, \
+ const Value& address, Value* result) \
F(StringEncodeWtf16Array, const Value& str, const Value& array, \
const Value& start, Value* result) \
F(StringConcat, const Value& head, const Value& tail, Value* result) \
@@ -1119,7 +1144,7 @@ struct ControlBase : public PcForErrors<validate> {
F(StringAsWtf8, const Value& str, Value* result) \
F(StringViewWtf8Advance, const Value& view, const Value& pos, \
const Value& bytes, Value* result) \
- F(StringViewWtf8Encode, const MemoryIndexImmediate<validate>& memory, \
+ F(StringViewWtf8Encode, const MemoryIndexImmediate& memory, \
const unibrow::Utf8Variant variant, const Value& view, const Value& addr, \
const Value& pos, const Value& bytes, Value* next_pos, \
Value* bytes_written) \
@@ -1128,7 +1153,7 @@ struct ControlBase : public PcForErrors<validate> {
F(StringAsWtf16, const Value& str, Value* result) \
F(StringViewWtf16GetCodeUnit, const Value& view, const Value& pos, \
Value* result) \
- F(StringViewWtf16Encode, const MemoryIndexImmediate<validate>& memory, \
+ F(StringViewWtf16Encode, const MemoryIndexImmediate& memory, \
const Value& view, const Value& addr, const Value& pos, \
const Value& codeunits, Value* result) \
F(StringViewWtf16Slice, const Value& view, const Value& start, \
@@ -1146,12 +1171,119 @@ struct ControlBase : public PcForErrors<validate> {
// the current instruction trace pointer in the default case
const std::pair<uint32_t, uint32_t> invalid_instruction_trace = {0, 0};
+// A fast vector implementation, without implicit bounds checks (see
+// https://crbug.com/1358853).
+template <typename T>
+class FastZoneVector {
+ public:
+#ifdef DEBUG
+ ~FastZoneVector() {
+ // Check that {Reset} was called on this vector.
+ DCHECK_NULL(begin_);
+ }
+#endif
+
+ void Reset(Zone* zone) {
+ if (begin_ == nullptr) return;
+ if constexpr (!std::is_trivially_destructible_v<T>) {
+ for (T* ptr = begin_; ptr != end_; ++ptr) {
+ ptr->~T();
+ }
+ }
+ zone->DeleteArray(begin_, capacity_end_ - begin_);
+ begin_ = nullptr;
+ end_ = nullptr;
+ capacity_end_ = nullptr;
+ }
+
+ T* begin() const { return begin_; }
+ T* end() const { return end_; }
+
+ T& front() {
+ DCHECK(!empty());
+ return begin_[0];
+ }
+
+ T& back() {
+ DCHECK(!empty());
+ return end_[-1];
+ }
+
+ uint32_t size() const { return static_cast<uint32_t>(end_ - begin_); }
+
+ bool empty() const { return begin_ == end_; }
+
+ T& operator[](uint32_t index) {
+ DCHECK_GE(size(), index);
+ return begin_[index];
+ }
+
+ void shrink_to(uint32_t new_size) {
+ DCHECK_GE(size(), new_size);
+ end_ = begin_ + new_size;
+ }
+
+ void pop(uint32_t num = 1) {
+ DCHECK_GE(size(), num);
+ for (T* new_end = end_ - num; end_ != new_end;) {
+ --end_;
+ end_->~T();
+ }
+ }
+
+ void push(T value) {
+ DCHECK_GT(capacity_end_, end_);
+ *end_ = std::move(value);
+ ++end_;
+ }
+
+ template <typename... Args>
+ void emplace_back(Args&&... args) {
+ DCHECK_GT(capacity_end_, end_);
+ new (end_) T{std::forward<Args>(args)...};
+ ++end_;
+ }
+
+ V8_INLINE void EnsureMoreCapacity(int slots_needed, Zone* zone) {
+ if (V8_LIKELY(capacity_end_ - end_ >= slots_needed)) return;
+ Grow(slots_needed, zone);
+ }
+
+ private:
+ V8_NOINLINE void Grow(int slots_needed, Zone* zone) {
+ size_t new_capacity = std::max(
+ size_t{8}, base::bits::RoundUpToPowerOfTwo(size() + slots_needed));
+ CHECK_GE(kMaxUInt32, new_capacity);
+ DCHECK_LT(capacity_end_ - begin_, new_capacity);
+ T* new_begin = zone->template NewArray<T>(new_capacity);
+ if (begin_) {
+ for (T *ptr = begin_, *new_ptr = new_begin; ptr != end_;
+ ++ptr, ++new_ptr) {
+ new (new_ptr) T{std::move(*ptr)};
+ ptr->~T();
+ }
+ zone->DeleteArray(begin_, capacity_end_ - begin_);
+ }
+ end_ = new_begin + (end_ - begin_);
+ begin_ = new_begin;
+ capacity_end_ = new_begin + new_capacity;
+ }
+
+ // The array is zone-allocated inside {EnsureMoreCapacity}.
+ T* begin_ = nullptr;
+ T* end_ = nullptr;
+ T* capacity_end_ = nullptr;
+};
+
// Generic Wasm bytecode decoder with utilities for decoding immediates,
// lengths, etc.
-template <Decoder::ValidateFlag validate, DecodingMode decoding_mode>
+template <typename ValidationTag, DecodingMode decoding_mode = kFunctionBody>
class WasmDecoder : public Decoder {
+ // {full_validation} implies {validate}.
+ static_assert(!ValidationTag::full_validation || ValidationTag::validate);
+
public:
- WasmDecoder(Zone* zone, const WasmModule* module, const WasmFeatures& enabled,
+ WasmDecoder(Zone* zone, const WasmModule* module, WasmFeatures enabled,
WasmFeatures* detected, const FunctionSig* sig, const byte* start,
const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, end, buffer_offset),
@@ -1200,13 +1332,22 @@ class WasmDecoder : public Decoder {
*total_length = 0;
// Decode local declarations, if any.
- uint32_t entries = read_u32v<validate>(pc, &length, "local decls count");
+ uint32_t entries =
+ read_u32v<ValidationTag>(pc, &length, "local decls count");
if (!VALIDATE(ok())) {
return DecodeError(pc + *total_length, "invalid local decls count");
}
*total_length += length;
TRACE("local decls count: %u\n", entries);
+ // Do an early validity check, to avoid allocating too much memory below.
+ // Every entry needs at least two bytes (count plus type); if that many are
+ // not available any more, flag that as an error.
+ if (available_bytes() / 2 < entries) {
+ return DecodeError(
+ pc, "local decls count bigger than remaining function size");
+ }
+
struct DecodedLocalEntry {
uint32_t count;
ValueType type;
@@ -1219,7 +1360,7 @@ class WasmDecoder : public Decoder {
}
uint32_t count =
- read_u32v<validate>(pc + *total_length, &length, "local count");
+ read_u32v<ValidationTag>(pc + *total_length, &length, "local count");
if (!VALIDATE(ok())) {
return DecodeError(pc + *total_length, "invalid local count");
}
@@ -1229,8 +1370,9 @@ class WasmDecoder : public Decoder {
}
*total_length += length;
- ValueType type = value_type_reader::read_value_type<validate>(
- this, pc + *total_length, &length, this->module_, enabled_);
+ ValueType type = value_type_reader::read_value_type<ValidationTag>(
+ this, pc + *total_length, &length, enabled_);
+ ValidateValueType(pc + *total_length, type);
if (!VALIDATE(ok())) return;
*total_length += length;
@@ -1259,10 +1401,10 @@ class WasmDecoder : public Decoder {
}
// Shorthand that forwards to the {DecodeError} functions above, passing our
- // {validate} flag.
+ // {ValidationTag}.
template <typename... Args>
void DecodeError(Args... args) {
- wasm::DecodeError<validate>(this, std::forward<Args>(args)...);
+ wasm::DecodeError<ValidationTag>(this, std::forward<Args>(args)...);
}
// Returns a BitVector of length {locals_count + 1} representing the set of
@@ -1290,7 +1432,7 @@ class WasmDecoder : public Decoder {
break;
case kExprLocalSet:
case kExprLocalTee: {
- IndexImmediate<validate> imm(decoder, pc + 1, "local index");
+ IndexImmediate imm(decoder, pc + 1, "local index", validate);
// Unverified code might have an out-of-bounds index.
if (imm.index < locals_count) assigned->Add(imm.index);
break;
@@ -1315,7 +1457,7 @@ class WasmDecoder : public Decoder {
return VALIDATE(decoder->ok()) ? assigned : nullptr;
}
- bool Validate(const byte* pc, TagIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, TagIndexImmediate& imm) {
if (!VALIDATE(imm.index < module_->tags.size())) {
DecodeError(pc, "Invalid tag index: %u", imm.index);
return false;
@@ -1324,7 +1466,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, GlobalIndexImmediate& imm) {
// We compare with the current size of the globals vector. This is important
// if we are decoding a constant expression in the global section.
if (!VALIDATE(imm.index < module_->globals.size())) {
@@ -1350,7 +1492,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, SigIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, SigIndexImmediate& imm) {
if (!VALIDATE(module_->has_signature(imm.index))) {
DecodeError(pc, "invalid signature index: %u", imm.index);
return false;
@@ -1359,7 +1501,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, StructIndexImmediate& imm) {
if (!VALIDATE(module_->has_struct(imm.index))) {
DecodeError(pc, "invalid struct index: %u", imm.index);
return false;
@@ -1368,7 +1510,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, FieldImmediate<validate>& imm) {
+ bool Validate(const byte* pc, FieldImmediate& imm) {
if (!Validate(pc, imm.struct_imm)) return false;
if (!VALIDATE(imm.field_imm.index <
imm.struct_imm.struct_type->field_count())) {
@@ -1379,7 +1521,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, ArrayIndexImmediate& imm) {
if (!VALIDATE(module_->has_array(imm.index))) {
DecodeError(pc, "invalid array index: %u", imm.index);
return false;
@@ -1397,7 +1539,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, CallFunctionImmediate<validate>& imm) {
+ bool Validate(const byte* pc, CallFunctionImmediate& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) {
DecodeError(pc, "function index #%u is out of bounds", imm.index);
return false;
@@ -1406,7 +1548,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
+ bool Validate(const byte* pc, CallIndirectImmediate& imm) {
if (!ValidateSignature(pc, imm.sig_imm)) return false;
if (!ValidateTable(pc + imm.sig_imm.length, imm.table_imm)) {
return false;
@@ -1434,7 +1576,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
+ bool Validate(const byte* pc, BranchDepthImmediate& imm,
size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
DecodeError(pc, "invalid branch depth: %u", imm.depth);
@@ -1443,8 +1585,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
- size_t block_depth) {
+ bool Validate(const byte* pc, BranchTableImmediate& imm, size_t block_depth) {
if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
DecodeError(pc, "invalid table count (> max br_table size): %u",
imm.table_count);
@@ -1453,8 +1594,7 @@ class WasmDecoder : public Decoder {
return checkAvailable(imm.table_count);
}
- bool Validate(const byte* pc, WasmOpcode opcode,
- SimdLaneImmediate<validate>& imm) {
+ bool Validate(const byte* pc, WasmOpcode opcode, SimdLaneImmediate& imm) {
uint8_t num_lanes = 0;
switch (opcode) {
case kExprF64x2ExtractLane:
@@ -1499,7 +1639,7 @@ class WasmDecoder : public Decoder {
}
}
- bool Validate(const byte* pc, Simd128Immediate<validate>& imm) {
+ bool Validate(const byte* pc, Simd128Immediate& imm) {
uint8_t max_lane = 0;
for (uint32_t i = 0; i < kSimd128Size; ++i) {
max_lane = std::max(max_lane, imm.value[i]);
@@ -1512,18 +1652,20 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, BlockTypeImmediate<validate>& imm) {
- if (imm.type != kWasmBottom) return true;
- if (!VALIDATE(module_->has_signature(imm.sig_index))) {
- DecodeError(pc, "block type index %u is not a signature definition",
- imm.sig_index);
- return false;
+ bool Validate(const byte* pc, BlockTypeImmediate& imm) {
+ if (!ValidateValueType(pc, imm.type)) return false;
+ if (imm.type == kWasmBottom) {
+ if (!VALIDATE(module_->has_signature(imm.sig_index))) {
+ DecodeError(pc, "block type index %u is not a signature definition",
+ imm.sig_index);
+ return false;
+ }
+ imm.sig = module_->signature(imm.sig_index);
}
- imm.sig = module_->signature(imm.sig_index);
return true;
}
- bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, MemoryIndexImmediate& imm) {
if (!VALIDATE(this->module_->has_memory)) {
this->DecodeError(pc, "memory instruction with no memory");
return false;
@@ -1535,7 +1677,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, MemoryAccessImmediate<validate>& imm) {
+ bool Validate(const byte* pc, MemoryAccessImmediate& imm) {
if (!VALIDATE(this->module_->has_memory)) {
this->DecodeError(pc, "memory instruction with no memory");
return false;
@@ -1543,17 +1685,17 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, MemoryInitImmediate<validate>& imm) {
+ bool Validate(const byte* pc, MemoryInitImmediate& imm) {
return ValidateDataSegment(pc, imm.data_segment) &&
Validate(pc + imm.data_segment.length, imm.memory);
}
- bool Validate(const byte* pc, MemoryCopyImmediate<validate>& imm) {
+ bool Validate(const byte* pc, MemoryCopyImmediate& imm) {
return Validate(pc, imm.memory_src) &&
Validate(pc + imm.memory_src.length, imm.memory_dst);
}
- bool Validate(const byte* pc, TableInitImmediate<validate>& imm) {
+ bool Validate(const byte* pc, TableInitImmediate& imm) {
if (!ValidateElementSegment(pc, imm.element_segment)) return false;
if (!ValidateTable(pc + imm.element_segment.length, imm.table)) {
return false;
@@ -1569,7 +1711,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, TableCopyImmediate<validate>& imm) {
+ bool Validate(const byte* pc, TableCopyImmediate& imm) {
if (!ValidateTable(pc, imm.table_src)) return false;
if (!ValidateTable(pc + imm.table_src.length, imm.table_dst)) return false;
ValueType src_type = module_->tables[imm.table_src.index].type;
@@ -1582,7 +1724,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool Validate(const byte* pc, StringConstImmediate<validate>& imm) {
+ bool Validate(const byte* pc, StringConstImmediate& imm) {
if (!VALIDATE(imm.index < module_->stringref_literals.size())) {
DecodeError(pc, "Invalid string literal index: %u", imm.index);
return false;
@@ -1592,7 +1734,7 @@ class WasmDecoder : public Decoder {
// The following Validate* functions all validate an IndexImmediate, albeit
// differently according to context.
- bool ValidateTable(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateTable(const byte* pc, IndexImmediate& imm) {
if (imm.index > 0 || imm.length > 1) {
this->detected_->Add(kFeature_reftypes);
}
@@ -1603,7 +1745,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool ValidateElementSegment(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateElementSegment(const byte* pc, IndexImmediate& imm) {
if (!VALIDATE(imm.index < module_->elem_segments.size())) {
DecodeError(pc, "invalid element segment index: %u", imm.index);
return false;
@@ -1611,7 +1753,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool ValidateLocal(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateLocal(const byte* pc, IndexImmediate& imm) {
if (!VALIDATE(imm.index < num_locals())) {
DecodeError(pc, "invalid local index: %u", imm.index);
return false;
@@ -1619,7 +1761,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool ValidateType(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateType(const byte* pc, IndexImmediate& imm) {
if (!VALIDATE(module_->has_type(imm.index))) {
DecodeError(pc, "invalid type index: %u", imm.index);
return false;
@@ -1627,7 +1769,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool ValidateSignature(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateSignature(const byte* pc, IndexImmediate& imm) {
if (!VALIDATE(module_->has_signature(imm.index))) {
DecodeError(pc, "invalid signature index: %u", imm.index);
return false;
@@ -1635,7 +1777,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool ValidateFunction(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateFunction(const byte* pc, IndexImmediate& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) {
DecodeError(pc, "function index #%u is out of bounds", imm.index);
return false;
@@ -1648,7 +1790,7 @@ class WasmDecoder : public Decoder {
return true;
}
- bool ValidateDataSegment(const byte* pc, IndexImmediate<validate>& imm) {
+ bool ValidateDataSegment(const byte* pc, IndexImmediate& imm) {
if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
DecodeError(pc, "invalid data segment index: %u", imm.index);
return false;
@@ -1656,48 +1798,28 @@ class WasmDecoder : public Decoder {
return true;
}
- class EmptyImmediateObserver {
- public:
- void BlockType(BlockTypeImmediate<validate>& imm) {}
- void HeapType(HeapTypeImmediate<validate>& imm) {}
- void BranchDepth(BranchDepthImmediate<validate>& imm) {}
- void BranchTable(BranchTableImmediate<validate>& imm) {}
- void CallIndirect(CallIndirectImmediate<validate>& imm) {}
- void SelectType(SelectTypeImmediate<validate>& imm) {}
- void MemoryAccess(MemoryAccessImmediate<validate>& imm) {}
- void SimdLane(SimdLaneImmediate<validate>& imm) {}
- void Field(FieldImmediate<validate>& imm) {}
- void Length(IndexImmediate<validate>& imm) {}
-
- void TagIndex(TagIndexImmediate<validate>& imm) {}
- void FunctionIndex(IndexImmediate<validate>& imm) {}
- void TypeIndex(IndexImmediate<validate>& imm) {}
- void LocalIndex(IndexImmediate<validate>& imm) {}
- void GlobalIndex(IndexImmediate<validate>& imm) {}
- void TableIndex(IndexImmediate<validate>& imm) {}
- void MemoryIndex(MemoryIndexImmediate<validate>& imm) {}
- void DataSegmentIndex(IndexImmediate<validate>& imm) {}
- void ElemSegmentIndex(IndexImmediate<validate>& imm) {}
-
- void I32Const(ImmI32Immediate<validate>& imm) {}
- void I64Const(ImmI64Immediate<validate>& imm) {}
- void F32Const(ImmF32Immediate<validate>& imm) {}
- void F64Const(ImmF64Immediate<validate>& imm) {}
- void S128Const(Simd128Immediate<validate>& imm) {}
- void StringConst(StringConstImmediate<validate>& imm) {}
-
- void MemoryInit(MemoryInitImmediate<validate>& imm) {}
- void MemoryCopy(MemoryCopyImmediate<validate>& imm) {}
- void TableInit(TableInitImmediate<validate>& imm) {}
- void TableCopy(TableCopyImmediate<validate>& imm) {}
- void ArrayCopy(IndexImmediate<validate>& dst,
- IndexImmediate<validate>& src) {}
- };
+ bool Validate(const byte* pc, SelectTypeImmediate& imm) {
+ return ValidateValueType(pc, imm.type);
+ }
+
+ bool Validate(const byte* pc, HeapTypeImmediate& imm) {
+ return ValidateHeapType(pc, imm.type);
+ }
+
+ bool ValidateValueType(const byte* pc, ValueType type) {
+ return value_type_reader::ValidateValueType<ValidationTag>(this, pc,
+ module_, type);
+ }
+
+ bool ValidateHeapType(const byte* pc, HeapType type) {
+ return value_type_reader::ValidateHeapType<ValidationTag>(this, pc, module_,
+ type);
+ }
// Returns the length of the opcode under {pc}.
- template <class ImmediateObserver = EmptyImmediateObserver>
+ template <typename... ImmediateObservers>
static uint32_t OpcodeLength(WasmDecoder* decoder, const byte* pc,
- ImmediateObserver* io = nullptr) {
+ ImmediateObservers&... ios) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// We don't have information about the module here, so we just assume that
// memory64 is enabled when parsing memory access immediates. This is
@@ -1717,9 +1839,8 @@ class WasmDecoder : public Decoder {
case kExprIf:
case kExprLoop:
case kExprBlock: {
- BlockTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc + 1,
- nullptr);
- if (io) io->BlockType(imm);
+ BlockTypeImmediate imm(WasmFeatures::All(), decoder, pc + 1, validate);
+ (ios.BlockType(imm), ...);
return 1 + imm.length;
}
case kExprRethrow:
@@ -1728,107 +1849,105 @@ class WasmDecoder : public Decoder {
case kExprBrOnNull:
case kExprBrOnNonNull:
case kExprDelegate: {
- BranchDepthImmediate<validate> imm(decoder, pc + 1);
- if (io) io->BranchDepth(imm);
+ BranchDepthImmediate imm(decoder, pc + 1, validate);
+ (ios.BranchDepth(imm), ...);
return 1 + imm.length;
}
case kExprBrTable: {
- BranchTableImmediate<validate> imm(decoder, pc + 1);
- if (io) io->BranchTable(imm);
- BranchTableIterator<validate> iterator(decoder, imm);
+ BranchTableImmediate imm(decoder, pc + 1, validate);
+ (ios.BranchTable(imm), ...);
+ BranchTableIterator<ValidationTag> iterator(decoder, imm);
return 1 + iterator.length();
}
case kExprThrow:
case kExprCatch: {
- TagIndexImmediate<validate> imm(decoder, pc + 1);
- if (io) io->TagIndex(imm);
+ TagIndexImmediate imm(decoder, pc + 1, validate);
+ (ios.TagIndex(imm), ...);
return 1 + imm.length;
}
/********** Misc opcodes **********/
case kExprCallFunction:
case kExprReturnCall: {
- CallFunctionImmediate<validate> imm(decoder, pc + 1);
- if (io) io->FunctionIndex(imm);
+ CallFunctionImmediate imm(decoder, pc + 1, validate);
+ (ios.FunctionIndex(imm), ...);
return 1 + imm.length;
}
case kExprCallIndirect:
case kExprReturnCallIndirect: {
- CallIndirectImmediate<validate> imm(decoder, pc + 1);
- if (io) io->CallIndirect(imm);
+ CallIndirectImmediate imm(decoder, pc + 1, validate);
+ (ios.CallIndirect(imm), ...);
return 1 + imm.length;
}
+ case kExprCallRefDeprecated: // TODO(7748): Drop after grace period.
case kExprCallRef:
case kExprReturnCallRef: {
- SigIndexImmediate<validate> imm(decoder, pc + 1);
- if (io) io->TypeIndex(imm);
+ SigIndexImmediate imm(decoder, pc + 1, validate);
+ (ios.TypeIndex(imm), ...);
return 1 + imm.length;
}
- case kExprCallRefDeprecated: // TODO(7748): Drop after grace period.
case kExprDrop:
case kExprSelect:
case kExprCatchAll:
return 1;
case kExprSelectWithType: {
- SelectTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc + 1,
- nullptr);
- if (io) io->SelectType(imm);
+ SelectTypeImmediate imm(WasmFeatures::All(), decoder, pc + 1, validate);
+ (ios.SelectType(imm), ...);
return 1 + imm.length;
}
case kExprLocalGet:
case kExprLocalSet:
case kExprLocalTee: {
- IndexImmediate<validate> imm(decoder, pc + 1, "local index");
- if (io) io->LocalIndex(imm);
+ IndexImmediate imm(decoder, pc + 1, "local index", validate);
+ (ios.LocalIndex(imm), ...);
return 1 + imm.length;
}
case kExprGlobalGet:
case kExprGlobalSet: {
- GlobalIndexImmediate<validate> imm(decoder, pc + 1);
- if (io) io->GlobalIndex(imm);
+ GlobalIndexImmediate imm(decoder, pc + 1, validate);
+ (ios.GlobalIndex(imm), ...);
return 1 + imm.length;
}
case kExprTableGet:
case kExprTableSet: {
- IndexImmediate<validate> imm(decoder, pc + 1, "table index");
- if (io) io->TableIndex(imm);
+ IndexImmediate imm(decoder, pc + 1, "table index", validate);
+ (ios.TableIndex(imm), ...);
return 1 + imm.length;
}
case kExprI32Const: {
- ImmI32Immediate<validate> imm(decoder, pc + 1);
- if (io) io->I32Const(imm);
+ ImmI32Immediate imm(decoder, pc + 1, validate);
+ (ios.I32Const(imm), ...);
return 1 + imm.length;
}
case kExprI64Const: {
- ImmI64Immediate<validate> imm(decoder, pc + 1);
- if (io) io->I64Const(imm);
+ ImmI64Immediate imm(decoder, pc + 1, validate);
+ (ios.I64Const(imm), ...);
return 1 + imm.length;
}
case kExprF32Const:
- if (io) {
- ImmF32Immediate<validate> imm(decoder, pc + 1);
- io->F32Const(imm);
+ if (sizeof...(ios) > 0) {
+ ImmF32Immediate imm(decoder, pc + 1, validate);
+ (ios.F32Const(imm), ...);
}
return 5;
case kExprF64Const:
- if (io) {
- ImmF64Immediate<validate> imm(decoder, pc + 1);
- io->F64Const(imm);
+ if (sizeof...(ios) > 0) {
+ ImmF64Immediate imm(decoder, pc + 1, validate);
+ (ios.F64Const(imm), ...);
}
return 9;
case kExprRefNull: {
- HeapTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc + 1,
- nullptr);
- if (io) io->HeapType(imm);
+ HeapTypeImmediate imm(WasmFeatures::All(), decoder, pc + 1, validate);
+ (ios.HeapType(imm), ...);
return 1 + imm.length;
}
case kExprRefIsNull:
case kExprRefAsNonNull:
return 1;
case kExprRefFunc: {
- IndexImmediate<validate> imm(decoder, pc + 1, "function index");
- if (io) io->FunctionIndex(imm);
+ IndexImmediate imm(decoder, pc + 1, "function index", validate);
+ (ios.FunctionIndex(imm), ...);
return 1 + imm.length;
}
@@ -1840,23 +1959,23 @@ class WasmDecoder : public Decoder {
return 1;
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE) {
- MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX,
- kConservativelyAssumeMemory64);
- if (io) io->MemoryAccess(imm);
+ MemoryAccessImmediate imm(decoder, pc + 1, UINT32_MAX,
+ kConservativelyAssumeMemory64, validate);
+ (ios.MemoryAccess(imm), ...);
return 1 + imm.length;
}
// clang-format on
case kExprMemoryGrow:
case kExprMemorySize: {
- MemoryIndexImmediate<validate> imm(decoder, pc + 1);
- if (io) io->MemoryIndex(imm);
+ MemoryIndexImmediate imm(decoder, pc + 1, validate);
+ (ios.MemoryIndex(imm), ...);
return 1 + imm.length;
}
/********** Prefixed opcodes **********/
case kNumericPrefix: {
uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<validate>(pc, &length);
+ opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length);
switch (opcode) {
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
@@ -1868,51 +1987,51 @@ class WasmDecoder : public Decoder {
case kExprI64UConvertSatF64:
return length;
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(decoder, pc + length);
- if (io) io->MemoryInit(imm);
+ MemoryInitImmediate imm(decoder, pc + length, validate);
+ (ios.MemoryInit(imm), ...);
return length + imm.length;
}
case kExprDataDrop: {
- IndexImmediate<validate> imm(decoder, pc + length,
- "data segment index");
- if (io) io->DataSegmentIndex(imm);
+ IndexImmediate imm(decoder, pc + length, "data segment index",
+ validate);
+ (ios.DataSegmentIndex(imm), ...);
return length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(decoder, pc + length);
- if (io) io->MemoryCopy(imm);
+ MemoryCopyImmediate imm(decoder, pc + length, validate);
+ (ios.MemoryCopy(imm), ...);
return length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(decoder, pc + length);
- if (io) io->MemoryIndex(imm);
+ MemoryIndexImmediate imm(decoder, pc + length, validate);
+ (ios.MemoryIndex(imm), ...);
return length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(decoder, pc + length);
- if (io) io->TableInit(imm);
+ TableInitImmediate imm(decoder, pc + length, validate);
+ (ios.TableInit(imm), ...);
return length + imm.length;
}
case kExprElemDrop: {
- IndexImmediate<validate> imm(decoder, pc + length,
- "element segment index");
- if (io) io->ElemSegmentIndex(imm);
+ IndexImmediate imm(decoder, pc + length, "element segment index",
+ validate);
+ (ios.ElemSegmentIndex(imm), ...);
return length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(decoder, pc + length);
- if (io) io->TableCopy(imm);
+ TableCopyImmediate imm(decoder, pc + length, validate);
+ (ios.TableCopy(imm), ...);
return length + imm.length;
}
case kExprTableGrow:
case kExprTableSize:
case kExprTableFill: {
- IndexImmediate<validate> imm(decoder, pc + length, "table index");
- if (io) io->TableIndex(imm);
+ IndexImmediate imm(decoder, pc + length, "table index", validate);
+ (ios.TableIndex(imm), ...);
return length + imm.length;
}
default:
- if (validate) {
+ if (ValidationTag::validate) {
decoder->DecodeError(pc, "invalid numeric opcode");
}
return length;
@@ -1920,33 +2039,32 @@ class WasmDecoder : public Decoder {
}
case kSimdPrefix: {
uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<validate>(pc, &length);
+ opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length);
switch (opcode) {
// clang-format off
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
return length;
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
- if (io) {
- SimdLaneImmediate<validate> lane_imm(decoder, pc + length);
- io->SimdLane(lane_imm);
+ if (sizeof...(ios) > 0) {
+ SimdLaneImmediate lane_imm(decoder, pc + length, validate);
+ (ios.SimdLane(lane_imm), ...);
}
return length + 1;
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE) {
- MemoryAccessImmediate<validate> imm(decoder, pc + length,
- UINT32_MAX,
- kConservativelyAssumeMemory64);
- if (io) io->MemoryAccess(imm);
+ MemoryAccessImmediate imm(decoder, pc + length, UINT32_MAX,
+ kConservativelyAssumeMemory64, validate);
+ (ios.MemoryAccess(imm), ...);
return length + imm.length;
}
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE) {
- MemoryAccessImmediate<validate> imm(
+ MemoryAccessImmediate imm(
decoder, pc + length, UINT32_MAX,
- kConservativelyAssumeMemory64);
- if (io) {
- SimdLaneImmediate<validate> lane_imm(decoder,
- pc + length + imm.length);
- io->MemoryAccess(imm);
- io->SimdLane(lane_imm);
+ kConservativelyAssumeMemory64, validate);
+ if (sizeof...(ios) > 0) {
+ SimdLaneImmediate lane_imm(decoder,
+ pc + length + imm.length, validate);
+ (ios.MemoryAccess(imm), ...);
+ (ios.SimdLane(lane_imm), ...);
}
// 1 more byte for lane index immediate.
return length + imm.length + 1;
@@ -1955,13 +2073,13 @@ class WasmDecoder : public Decoder {
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS128Const:
case kExprI8x16Shuffle:
- if (io) {
- Simd128Immediate<validate> imm(decoder, pc + length);
- io->S128Const(imm);
+ if (sizeof...(ios) > 0) {
+ Simd128Immediate imm(decoder, pc + length, validate);
+ (ios.S128Const(imm), ...);
}
return length + kSimd128Size;
default:
- if (validate) {
+ if (ValidationTag::validate) {
decoder->DecodeError(pc, "invalid SIMD opcode");
}
return length;
@@ -1969,14 +2087,13 @@ class WasmDecoder : public Decoder {
}
case kAtomicPrefix: {
uint32_t length = 0;
- opcode = decoder->read_prefixed_opcode<validate>(pc, &length,
- "atomic_index");
+ opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length,
+ "atomic_index");
switch (opcode) {
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE) {
- MemoryAccessImmediate<validate> imm(decoder, pc + length,
- UINT32_MAX,
- kConservativelyAssumeMemory64);
- if (io) io->MemoryAccess(imm);
+ MemoryAccessImmediate imm(decoder, pc + length, UINT32_MAX,
+ kConservativelyAssumeMemory64, validate);
+ (ios.MemoryAccess(imm), ...);
return length + imm.length;
}
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE) {
@@ -1984,7 +2101,7 @@ class WasmDecoder : public Decoder {
return length + 1;
}
default:
- if (validate) {
+ if (ValidationTag::validate) {
decoder->DecodeError(pc, "invalid Atomics opcode");
}
return length;
@@ -1992,21 +2109,21 @@ class WasmDecoder : public Decoder {
}
case kGCPrefix: {
uint32_t length = 0;
- opcode =
- decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
+ opcode = decoder->read_prefixed_opcode<ValidationTag>(pc, &length,
+ "gc_index");
switch (opcode) {
case kExprStructNew:
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(decoder, pc + length);
- if (io) io->TypeIndex(imm);
+ StructIndexImmediate imm(decoder, pc + length, validate);
+ (ios.TypeIndex(imm), ...);
return length + imm.length;
}
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprStructSet: {
- FieldImmediate<validate> imm(decoder, pc + length);
- if (io) io->Field(imm);
+ FieldImmediate imm(decoder, pc + length, validate);
+ (ios.Field(imm), ...);
return length + imm.length;
}
case kExprArrayNew:
@@ -2016,75 +2133,77 @@ class WasmDecoder : public Decoder {
case kExprArrayGetU:
case kExprArraySet:
case kExprArrayLenDeprecated: {
- ArrayIndexImmediate<validate> imm(decoder, pc + length);
- if (io) io->TypeIndex(imm);
+ ArrayIndexImmediate imm(decoder, pc + length, validate);
+ (ios.TypeIndex(imm), ...);
return length + imm.length;
}
case kExprArrayNewFixed: {
- ArrayIndexImmediate<validate> array_imm(decoder, pc + length);
- IndexImmediate<validate> length_imm(
- decoder, pc + length + array_imm.length, "array length");
- if (io) io->TypeIndex(array_imm);
- if (io) io->Length(length_imm);
+ ArrayIndexImmediate array_imm(decoder, pc + length, validate);
+ IndexImmediate length_imm(decoder, pc + length + array_imm.length,
+ "array length", validate);
+ (ios.TypeIndex(array_imm), ...);
+ (ios.Length(length_imm), ...);
return length + array_imm.length + length_imm.length;
}
case kExprArrayCopy: {
- ArrayIndexImmediate<validate> dst_imm(decoder, pc + length);
- ArrayIndexImmediate<validate> src_imm(decoder,
- pc + length + dst_imm.length);
- if (io) io->ArrayCopy(dst_imm, src_imm);
+ ArrayIndexImmediate dst_imm(decoder, pc + length, validate);
+ ArrayIndexImmediate src_imm(decoder, pc + length + dst_imm.length,
+ validate);
+ (ios.ArrayCopy(dst_imm, src_imm), ...);
return length + dst_imm.length + src_imm.length;
}
case kExprArrayNewData:
case kExprArrayNewElem: {
- ArrayIndexImmediate<validate> array_imm(decoder, pc + length);
- IndexImmediate<validate> data_imm(
- decoder, pc + length + array_imm.length, "segment index");
- if (io) io->TypeIndex(array_imm);
- if (io) io->DataSegmentIndex(data_imm);
+ ArrayIndexImmediate array_imm(decoder, pc + length, validate);
+ IndexImmediate data_imm(decoder, pc + length + array_imm.length,
+ "segment index", validate);
+ (ios.TypeIndex(array_imm), ...);
+ (ios.DataSegmentIndex(data_imm), ...);
return length + array_imm.length + data_imm.length;
}
case kExprBrOnArray:
- case kExprBrOnData:
+ case kExprBrOnStruct:
case kExprBrOnI31:
case kExprBrOnNonArray:
- case kExprBrOnNonData:
+ case kExprBrOnNonStruct:
case kExprBrOnNonI31: {
- BranchDepthImmediate<validate> imm(decoder, pc + length);
- if (io) io->BranchDepth(imm);
+ BranchDepthImmediate imm(decoder, pc + length, validate);
+ (ios.BranchDepth(imm), ...);
return length + imm.length;
}
+ case kExprRefCast:
+ case kExprRefCastNull:
case kExprRefTest:
case kExprRefTestNull: {
- HeapTypeImmediate<validate> imm(WasmFeatures::All(), decoder,
- pc + length, nullptr);
- if (io) io->HeapType(imm);
+ HeapTypeImmediate imm(WasmFeatures::All(), decoder, pc + length,
+ validate);
+ (ios.HeapType(imm), ...);
return length + imm.length;
}
case kExprRefTestDeprecated:
- case kExprRefCast:
+ case kExprRefCastDeprecated:
case kExprRefCastNop: {
- IndexImmediate<validate> imm(decoder, pc + length, "type index");
- if (io) io->TypeIndex(imm);
+ IndexImmediate imm(decoder, pc + length, "type index", validate);
+ (ios.TypeIndex(imm), ...);
return length + imm.length;
}
case kExprBrOnCast:
case kExprBrOnCastFail: {
- BranchDepthImmediate<validate> branch(decoder, pc + length);
- IndexImmediate<validate> index(decoder, pc + length + branch.length,
- "type index");
- if (io) io->BranchDepth(branch);
- if (io) io->TypeIndex(index);
+ BranchDepthImmediate branch(decoder, pc + length, validate);
+ IndexImmediate index(decoder, pc + length + branch.length,
+ "type index", validate);
+ (ios.BranchDepth(branch), ...);
+ (ios.TypeIndex(index), ...);
return length + branch.length + index.length;
}
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
case kExprRefAsArray:
- case kExprRefAsData:
+ case kExprRefAsStruct:
case kExprRefAsI31:
case kExprRefIsArray:
- case kExprRefIsData:
+ case kExprRefIsStruct:
case kExprRefIsI31:
case kExprExternInternalize:
case kExprExternExternalize:
@@ -2102,13 +2221,13 @@ class WasmDecoder : public Decoder {
case kExprStringNewWtf16:
case kExprStringEncodeWtf16:
case kExprStringViewWtf16Encode: {
- MemoryIndexImmediate<validate> imm(decoder, pc + length);
- if (io) io->MemoryIndex(imm);
+ MemoryIndexImmediate imm(decoder, pc + length, validate);
+ (ios.MemoryIndex(imm), ...);
return length + imm.length;
}
case kExprStringConst: {
- StringConstImmediate<validate> imm(decoder, pc + length);
- if (io) io->StringConst(imm);
+ StringConstImmediate imm(decoder, pc + length, validate);
+ (ios.StringConst(imm), ...);
return length + imm.length;
}
case kExprStringMeasureUtf8:
@@ -2140,7 +2259,7 @@ class WasmDecoder : public Decoder {
return length;
default:
// This is unreachable except for malformed modules.
- if (validate) {
+ if (ValidationTag::validate) {
decoder->DecodeError(pc, "invalid gc opcode");
}
return length;
@@ -2164,14 +2283,14 @@ class WasmDecoder : public Decoder {
#undef DECLARE_OPCODE_CASE
}
// Invalid modules will reach this point.
- if (validate) {
+ if (ValidationTag::validate) {
decoder->DecodeError(pc, "invalid opcode");
}
return 1;
}
// TODO(clemensb): This is only used by the interpreter; move there.
- std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
+ V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// Handle "simple" opcodes with a fixed signature first.
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -2214,19 +2333,19 @@ class WasmDecoder : public Decoder {
case kExprMemorySize:
return {0, 1};
case kExprCallFunction: {
- CallFunctionImmediate<validate> imm(this, pc + 1);
+ CallFunctionImmediate imm(this, pc + 1, validate);
CHECK(Validate(pc + 1, imm));
return {imm.sig->parameter_count(), imm.sig->return_count()};
}
case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this, pc + 1);
+ CallIndirectImmediate imm(this, pc + 1, validate);
CHECK(Validate(pc + 1, imm));
// Indirect calls pop an additional argument for the table index.
return {imm.sig->parameter_count() + 1,
imm.sig->return_count()};
}
case kExprThrow: {
- TagIndexImmediate<validate> imm(this, pc + 1);
+ TagIndexImmediate imm(this, pc + 1, validate);
CHECK(Validate(pc + 1, imm));
DCHECK_EQ(0, imm.tag->sig->return_count());
return {imm.tag->sig->parameter_count(), 0};
@@ -2251,7 +2370,7 @@ class WasmDecoder : public Decoder {
case kNumericPrefix:
case kAtomicPrefix:
case kSimdPrefix: {
- opcode = this->read_prefixed_opcode<validate>(pc);
+ opcode = this->read_prefixed_opcode<ValidationTag>(pc);
switch (opcode) {
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1};
@@ -2278,7 +2397,7 @@ class WasmDecoder : public Decoder {
}
case kGCPrefix: {
uint32_t unused_length;
- opcode = this->read_prefixed_opcode<validate>(pc, &unused_length);
+ opcode = this->read_prefixed_opcode<ValidationTag>(pc, &unused_length);
switch (opcode) {
case kExprStructGet:
case kExprStructGetS:
@@ -2293,6 +2412,8 @@ class WasmDecoder : public Decoder {
case kExprRefTestNull:
case kExprRefTestDeprecated:
case kExprRefCast:
+ case kExprRefCastNull:
+ case kExprRefCastDeprecated:
case kExprRefCastNop:
case kExprBrOnCast:
case kExprBrOnCastFail:
@@ -2313,14 +2434,14 @@ class WasmDecoder : public Decoder {
case kExprStructNewDefault:
return {0, 1};
case kExprStructNew: {
- StructIndexImmediate<validate> imm(this, pc + 2);
+ StructIndexImmediate imm(this, pc + 2, validate);
CHECK(Validate(pc + 2, imm));
return {imm.struct_type->field_count(), 1};
}
case kExprArrayNewFixed: {
- ArrayIndexImmediate<validate> array_imm(this, pc + 2);
- IndexImmediate<validate> length_imm(this, pc + 2 + array_imm.length,
- "array length");
+ ArrayIndexImmediate array_imm(this, pc + 2, validate);
+ IndexImmediate length_imm(this, pc + 2 + array_imm.length,
+ "array length", validate);
return {length_imm.index, 1};
}
case kExprStringConst:
@@ -2381,6 +2502,8 @@ class WasmDecoder : public Decoder {
// clang-format on
}
+ static constexpr ValidationTag validate = {};
+
Zone* const compilation_zone_;
ValueType* local_types_ = nullptr;
@@ -2421,9 +2544,9 @@ class WasmDecoder : public Decoder {
} \
} while (false)
-template <Decoder::ValidateFlag validate, typename Interface,
+template <typename ValidationTag, typename Interface,
DecodingMode decoding_mode = kFunctionBody>
-class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
+class WasmFullDecoder : public WasmDecoder<ValidationTag, decoding_mode> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
using ArgVector = base::Vector<Value>;
@@ -2438,18 +2561,21 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
WasmFullDecoder(Zone* zone, const WasmModule* module,
const WasmFeatures& enabled, WasmFeatures* detected,
const FunctionBody& body, InterfaceArgs&&... interface_args)
- : WasmDecoder<validate, decoding_mode>(zone, module, enabled, detected,
- body.sig, body.start, body.end,
- body.offset),
- interface_(std::forward<InterfaceArgs>(interface_args)...),
- initialized_locals_(zone),
- locals_initializers_stack_(zone),
- control_(zone) {}
+ : WasmDecoder<ValidationTag, decoding_mode>(
+ zone, module, enabled, detected, body.sig, body.start, body.end,
+ body.offset),
+ interface_(std::forward<InterfaceArgs>(interface_args)...) {}
+
+ ~WasmFullDecoder() {
+ control_.Reset(this->compilation_zone_);
+ stack_.Reset(this->compilation_zone_);
+ locals_initializers_stack_.Reset(this->compilation_zone_);
+ }
Interface& interface() { return interface_; }
bool Decode() {
- DCHECK_EQ(stack_end_, stack_);
+ DCHECK(stack_.empty());
DCHECK(control_.empty());
DCHECK_LE(this->pc_, this->end_);
DCHECK_EQ(this->num_locals(), 0);
@@ -2457,7 +2583,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
locals_offset_ = this->pc_offset();
uint32_t locals_length;
this->DecodeLocals(this->pc(), &locals_length);
- if (this->failed()) return TraceFailed();
+ if (!VALIDATE(this->ok())) return TraceFailed();
this->consume_bytes(locals_length);
int non_defaultable = 0;
uint32_t params_count =
@@ -2470,7 +2596,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Cannot use CALL_INTERFACE_* macros because control is empty.
interface().StartFunction(this);
DecodeFunctionBody();
- if (this->failed()) return TraceFailed();
+ if (!VALIDATE(this->ok())) return TraceFailed();
if (!VALIDATE(control_.empty())) {
if (control_.size() > 1) {
@@ -2483,13 +2609,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
// Cannot use CALL_INTERFACE_* macros because control is empty.
interface().FinishFunction(this);
- if (this->failed()) return TraceFailed();
+ if (!VALIDATE(this->ok())) return TraceFailed();
TRACE("wasm-decode ok\n\n");
return true;
}
bool TraceFailed() {
+ if constexpr (!ValidationTag::validate) UNREACHABLE();
if (this->error_.offset()) {
TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
this->GetBufferRelativeOffset(this->error_.offset()),
@@ -2507,7 +2634,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
- opcode = this->template read_prefixed_opcode<Decoder::kFullValidation>(pc);
+ opcode =
+ this->template read_prefixed_opcode<Decoder::FullValidationTag>(pc);
return WasmOpcodes::OpcodeName(opcode);
}
@@ -2523,19 +2651,15 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Control* control_at(uint32_t depth) {
DCHECK_GT(control_.size(), depth);
- return &control_.back() - depth;
+ return control_.end() - 1 - depth;
}
- uint32_t stack_size() const {
- DCHECK_GE(stack_end_, stack_);
- DCHECK_GE(kMaxUInt32, stack_end_ - stack_);
- return static_cast<uint32_t>(stack_end_ - stack_);
- }
+ uint32_t stack_size() const { return stack_.size(); }
Value* stack_value(uint32_t depth) const {
DCHECK_LT(0, depth);
- DCHECK_GE(stack_size(), depth);
- return stack_end_ - depth;
+ DCHECK_GE(stack_.size(), depth);
+ return stack_.end() - depth;
}
int32_t current_catch() const { return current_catch_; }
@@ -2557,17 +2681,19 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
bool is_local_initialized(uint32_t local_index) {
+ DCHECK_GT(this->num_locals_, local_index);
if (!has_nondefaultable_locals_) return true;
return initialized_locals_[local_index];
}
void set_local_initialized(uint32_t local_index) {
+ DCHECK_GT(this->num_locals_, local_index);
if (!has_nondefaultable_locals_) return;
// This implicitly covers defaultable locals too (which are always
// initialized).
if (is_local_initialized(local_index)) return;
initialized_locals_[local_index] = true;
- locals_initializers_stack_.push_back(local_index);
+ locals_initializers_stack_.push(local_index);
}
uint32_t locals_initialization_stack_depth() const {
@@ -2579,7 +2705,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
uint32_t previous_stack_height = c->init_stack_depth;
while (locals_initializers_stack_.size() > previous_stack_height) {
uint32_t local_index = locals_initializers_stack_.back();
- locals_initializers_stack_.pop_back();
+ locals_initializers_stack_.pop();
initialized_locals_[local_index] = false;
}
}
@@ -2587,18 +2713,18 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
void InitializeInitializedLocalsTracking(int non_defaultable_locals) {
has_nondefaultable_locals_ = non_defaultable_locals > 0;
if (!has_nondefaultable_locals_) return;
- initialized_locals_.assign(this->num_locals_, false);
- // Parameters count as initialized...
+ initialized_locals_ =
+ this->compilation_zone_->template NewArray<bool>(this->num_locals_);
+ // Parameters are always initialized.
const size_t num_params = this->sig_->parameter_count();
- for (size_t i = 0; i < num_params; i++) {
- initialized_locals_[i] = true;
- }
- // ...and so do defaultable locals.
+ std::fill_n(initialized_locals_, num_params, true);
+ // Locals are initialized if they are defaultable.
for (size_t i = num_params; i < this->num_locals_; i++) {
- if (this->local_types_[i].is_defaultable()) initialized_locals_[i] = true;
+ initialized_locals_[i] = this->local_types_[i].is_defaultable();
}
- if (non_defaultable_locals == 0) return;
- locals_initializers_stack_.reserve(non_defaultable_locals);
+ DCHECK(locals_initializers_stack_.empty());
+ locals_initializers_stack_.EnsureMoreCapacity(non_defaultable_locals,
+ this->compilation_zone_);
}
void DecodeFunctionBody() {
@@ -2611,6 +2737,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DCHECK(control_.empty());
constexpr uint32_t kStackDepth = 0;
constexpr uint32_t kInitStackDepth = 0;
+ control_.EnsureMoreCapacity(1, this->compilation_zone_);
control_.emplace_back(kControlBlock, kStackDepth, kInitStackDepth,
this->pc_, kReachable);
Control* c = &control_.back();
@@ -2638,7 +2765,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// and binary operations, local.get, constants, ...). Thus check that
// there is enough space for those operations centrally, and avoid any
// bounds checks in those operations.
- EnsureStackSpace(1);
+ stack_.EnsureMoreCapacity(1, this->compilation_zone_);
uint8_t first_byte = *this->pc_;
WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
@@ -2676,7 +2803,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// and binary operations, local.get, constants, ...). Thus check that
// there is enough space for those operations centrally, and avoid any
// bounds checks in those operations.
- EnsureStackSpace(1);
+ stack_.EnsureMoreCapacity(1, this->compilation_zone_);
uint8_t first_byte = *this->pc_;
WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
@@ -2696,24 +2823,21 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Interface interface_;
// The value stack, stored as individual pointers for maximum performance.
- Value* stack_ = nullptr;
- Value* stack_end_ = nullptr;
- Value* stack_capacity_end_ = nullptr;
- ASSERT_TRIVIALLY_COPYABLE(Value);
+ FastZoneVector<Value> stack_;
// Indicates whether the local with the given index is currently initialized.
- // Entries for defaultable locals are meaningless; we have a bit for each
+ // Entries for defaultable locals are meaningless; we have a byte for each
// local because we expect that the effort required to densify this bit
// vector would more than offset the memory savings.
- ZoneVector<bool> initialized_locals_;
+ bool* initialized_locals_;
// Keeps track of initializing assignments to non-defaultable locals that
// happened, so they can be discarded at the end of the current block.
// Contains no duplicates, so the size of this stack is bounded (and pre-
// allocated) to the number of non-defaultable locals in the function.
- ZoneVector<uint32_t> locals_initializers_stack_;
+ FastZoneVector<uint32_t> locals_initializers_stack_;
- // stack of blocks, loops, and ifs.
- ZoneVector<Control> control_;
+ // Control stack (blocks, loops, ifs, ...).
+ FastZoneVector<Control> control_;
// Controls whether code should be generated for the current block (basically
// a cache for {ok() && control_.back().reachable()}).
@@ -2741,10 +2865,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return true;
}
- MemoryAccessImmediate<validate> MakeMemoryAccessImmediate(
- uint32_t pc_offset, uint32_t max_alignment) {
- return MemoryAccessImmediate<validate>(
- this, this->pc_ + pc_offset, max_alignment, this->module_->is_memory64);
+ MemoryAccessImmediate MakeMemoryAccessImmediate(uint32_t pc_offset,
+ uint32_t max_alignment) {
+ return MemoryAccessImmediate(this, this->pc_ + pc_offset, max_alignment,
+ this->module_->is_memory64, validate);
}
#ifdef DEBUG
@@ -2813,7 +2937,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (!c.reachable()) Append("%c", c.unreachable() ? '*' : '#');
}
Append(" | ");
- for (size_t i = 0; i < decoder_->stack_size(); ++i) {
+ for (uint32_t i = 0; i < decoder_->stack_.size(); ++i) {
Value& val = decoder_->stack_[i];
Append(" %c", val.type.short_name());
}
@@ -2874,8 +2998,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
#undef BUILD_SIMPLE_OPCODE
DECODE(Block) {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
- this->module_);
+ BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ArgVector args = PeekArgs(imm.sig);
Control* block = PushControl(kControlBlock, args.length());
@@ -2887,8 +3010,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Rethrow) {
- CHECK_PROTOTYPE_OPCODE(eh);
- BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ this->detected_->Add(kFeature_eh);
+ BranchDepthImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Control* c = control_at(imm.depth);
if (!VALIDATE(c->is_try_catchall() || c->is_try_catch())) {
@@ -2901,8 +3024,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Throw) {
- CHECK_PROTOTYPE_OPCODE(eh);
- TagIndexImmediate<validate> imm(this, this->pc_ + 1);
+ this->detected_->Add(kFeature_eh);
+ TagIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ArgVector args = PeekArgs(imm.tag->ToFunctionSig());
CALL_INTERFACE_IF_OK_AND_REACHABLE(Throw, imm, base::VectorOf(args));
@@ -2912,9 +3035,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Try) {
- CHECK_PROTOTYPE_OPCODE(eh);
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
- this->module_);
+ this->detected_->Add(kFeature_eh);
+ BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ArgVector args = PeekArgs(imm.sig);
Control* try_block = PushControl(kControlTry, args.length());
@@ -2928,8 +3050,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Catch) {
- CHECK_PROTOTYPE_OPCODE(eh);
- TagIndexImmediate<validate> imm(this, this->pc_ + 1);
+ this->detected_->Add(kFeature_eh);
+ TagIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
DCHECK(!control_.empty());
Control* c = &control_.back();
@@ -2945,23 +3067,24 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
c->kind = kControlTryCatch;
// TODO(jkummerow): Consider moving the stack manipulation after the
// INTERFACE call for consistency.
- DCHECK_LE(stack_ + c->stack_depth, stack_end_);
- stack_end_ = stack_ + c->stack_depth;
+ stack_.shrink_to(c->stack_depth);
c->reachability = control_at(1)->innerReachability();
RollbackLocalsInitialization(c);
const WasmTagSig* sig = imm.tag->sig;
- EnsureStackSpace(static_cast<int>(sig->parameter_count()));
+ stack_.EnsureMoreCapacity(static_cast<int>(sig->parameter_count()),
+ this->compilation_zone_);
for (ValueType type : sig->parameters()) Push(CreateValue(type));
- base::Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
+ base::Vector<Value> values(stack_.begin() + c->stack_depth,
+ sig->parameter_count());
current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
- current_code_reachable_and_ok_ = this->ok() && c->reachable();
+ current_code_reachable_and_ok_ = VALIDATE(this->ok()) && c->reachable();
return 1 + imm.length;
}
DECODE(Delegate) {
- CHECK_PROTOTYPE_OPCODE(eh);
- BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ this->detected_->Add(kFeature_eh);
+ BranchDepthImmediate imm(this, this->pc_ + 1, validate);
// -1 because the current try block is not included in the count.
if (!this->Validate(this->pc_ + 1, imm, control_depth() - 1)) return 0;
Control* c = &control_.back();
@@ -2986,7 +3109,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(CatchAll) {
- CHECK_PROTOTYPE_OPCODE(eh);
+ this->detected_->Add(kFeature_eh);
DCHECK(!control_.empty());
Control* c = &control_.back();
if (!VALIDATE(c->is_try())) {
@@ -3003,14 +3126,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
RollbackLocalsInitialization(c);
current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
- stack_end_ = stack_ + c->stack_depth;
- current_code_reachable_and_ok_ = this->ok() && c->reachable();
+ stack_.shrink_to(c->stack_depth);
+ current_code_reachable_and_ok_ = VALIDATE(this->ok()) && c->reachable();
return 1;
}
DECODE(BrOnNull) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ BranchDepthImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value ref_object = Peek(0);
Control* c = control_at(imm.depth);
@@ -3047,7 +3170,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(BrOnNonNull) {
CHECK_PROTOTYPE_OPCODE(gc);
- BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ BranchDepthImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value ref_object = Peek(0);
if (!VALIDATE(ref_object.type.is_object_reference() ||
@@ -3100,8 +3223,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Loop) {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
- this->module_);
+ BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ArgVector args = PeekArgs(imm.sig);
Control* block = PushControl(kControlLoop, args.length());
@@ -3113,8 +3235,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(If) {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
- this->module_);
+ BlockTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
Value cond = Peek(0, 0, kWasmI32);
ArgVector args = PeekArgs(imm.sig, 1);
@@ -3146,7 +3267,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
RollbackLocalsInitialization(c);
PushMergeValues(c, &c->start_merge);
c->reachability = control_at(1)->innerReachability();
- current_code_reachable_and_ok_ = this->ok() && c->reachable();
+ current_code_reachable_and_ok_ = VALIDATE(this->ok()) && c->reachable();
return 1;
}
@@ -3165,7 +3286,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
c->reachability = control_at(1)->innerReachability();
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
current_code_reachable_and_ok_ =
- this->ok() && control_.back().reachable();
+ VALIDATE(this->ok()) && control_.back().reachable();
CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
EndControl();
PopControl();
@@ -3190,7 +3311,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// The result of the block is the return value.
trace_msg->Append("\n" TRACE_INST_FORMAT, startrel(this->pc_),
"(implicit) return");
- control_.clear();
+ control_.pop();
return 1;
}
@@ -3218,9 +3339,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(SelectWithType) {
this->detected_->Add(kFeature_reftypes);
- SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
- this->module_);
- if (this->failed()) return 0;
+ SelectTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
+ this->Validate(this->pc_ + 1, imm);
+ if (!VALIDATE(this->ok())) return 0;
Value cond = Peek(0, 2, kWasmI32);
Value fval = Peek(1, 1, imm.type);
Value tval = Peek(2, 0, imm.type);
@@ -3232,7 +3353,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Br) {
- BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ BranchDepthImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Control* c = control_at(imm.depth);
if (!VALIDATE(TypeCheckBranch<false>(c, 0))) return 0;
@@ -3245,7 +3366,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(BrIf) {
- BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ BranchDepthImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value cond = Peek(0, 0, kWasmI32);
Control* c = control_at(imm.depth);
@@ -3259,10 +3380,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(BrTable) {
- BranchTableImmediate<validate> imm(this, this->pc_ + 1);
- BranchTableIterator<validate> iterator(this, imm);
+ BranchTableImmediate imm(this, this->pc_ + 1, validate);
+ BranchTableIterator<ValidationTag> iterator(this, imm);
Value key = Peek(0, 0, kWasmI32);
- if (this->failed()) return 0;
+ if (!VALIDATE(this->ok())) return 0;
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
// Cache the branch targets during the iteration, so that we can set
@@ -3283,7 +3404,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (br_targets[target]) continue;
br_targets[target] = true;
- if (validate) {
+ if (ValidationTag::validate) {
if (index == 0) {
arity = control_at(target)->br_merge()->arity;
} else if (!VALIDATE(control_at(target)->br_merge()->arity == arity)) {
@@ -3319,7 +3440,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(I32Const) {
- ImmI32Immediate<validate> imm(this, this->pc_ + 1);
+ ImmI32Immediate imm(this, this->pc_ + 1, validate);
Value value = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(I32Const, &value, imm.value);
Push(value);
@@ -3327,7 +3448,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(I64Const) {
- ImmI64Immediate<validate> imm(this, this->pc_ + 1);
+ ImmI64Immediate imm(this, this->pc_ + 1, validate);
Value value = CreateValue(kWasmI64);
CALL_INTERFACE_IF_OK_AND_REACHABLE(I64Const, &value, imm.value);
Push(value);
@@ -3335,7 +3456,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(F32Const) {
- ImmF32Immediate<validate> imm(this, this->pc_ + 1);
+ ImmF32Immediate imm(this, this->pc_ + 1, validate);
Value value = CreateValue(kWasmF32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(F32Const, &value, imm.value);
Push(value);
@@ -3343,7 +3464,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(F64Const) {
- ImmF64Immediate<validate> imm(this, this->pc_ + 1);
+ ImmF64Immediate imm(this, this->pc_ + 1, validate);
Value value = CreateValue(kWasmF64);
CALL_INTERFACE_IF_OK_AND_REACHABLE(F64Const, &value, imm.value);
Push(value);
@@ -3352,8 +3473,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(RefNull) {
this->detected_->Add(kFeature_reftypes);
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1,
- this->module_);
+ HeapTypeImmediate imm(this->enabled_, this, this->pc_ + 1, validate);
+ this->Validate(this->pc_ + 1, imm);
if (!VALIDATE(this->ok())) return 0;
ValueType type = ValueType::RefNull(imm.type);
Value value = CreateValue(type);
@@ -3383,7 +3504,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(result);
return 1;
default:
- if (validate) {
+ if (ValidationTag::validate) {
PopTypeError(0, value, "reference type");
return 0;
}
@@ -3393,7 +3514,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(RefFunc) {
this->detected_->Add(kFeature_reftypes);
- IndexImmediate<validate> imm(this, this->pc_ + 1, "function index");
+ IndexImmediate imm(this, this->pc_ + 1, "function index", validate);
if (!this->ValidateFunction(this->pc_ + 1, imm)) return 0;
HeapType heap_type(this->enabled_.has_typed_funcref()
? this->module_->functions[imm.index].sig_index
@@ -3421,7 +3542,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 1;
}
default:
- if (validate) {
+ if (ValidationTag::validate) {
PopTypeError(0, value, "reference type");
}
return 0;
@@ -3429,7 +3550,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
V8_INLINE DECODE(LocalGet) {
- IndexImmediate<validate> imm(this, this->pc_ + 1, "local index");
+ IndexImmediate imm(this, this->pc_ + 1, "local index", validate);
if (!this->ValidateLocal(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->is_local_initialized(imm.index))) {
this->DecodeError(this->pc_, "uninitialized non-defaultable local: %u",
@@ -3443,7 +3564,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(LocalSet) {
- IndexImmediate<validate> imm(this, this->pc_ + 1, "local index");
+ IndexImmediate imm(this, this->pc_ + 1, "local index", validate);
if (!this->ValidateLocal(this->pc_ + 1, imm)) return 0;
Value value = Peek(0, 0, this->local_type(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalSet, value, imm);
@@ -3453,7 +3574,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(LocalTee) {
- IndexImmediate<validate> imm(this, this->pc_ + 1, "local index");
+ IndexImmediate imm(this, this->pc_ + 1, "local index", validate);
if (!this->ValidateLocal(this->pc_ + 1, imm)) return 0;
ValueType local_type = this->local_type(imm.index);
Value value = Peek(0, 0, local_type);
@@ -3473,7 +3594,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(GlobalGet) {
- GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
+ GlobalIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
Value result = CreateValue(imm.global->type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(GlobalGet, &result, imm);
@@ -3482,7 +3603,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(GlobalSet) {
- GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
+ GlobalIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(imm.global->mutability)) {
this->DecodeError("immutable global #%u cannot be assigned", imm.index);
@@ -3496,7 +3617,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(TableGet) {
this->detected_->Add(kFeature_reftypes);
- IndexImmediate<validate> imm(this, this->pc_ + 1, "table index");
+ IndexImmediate imm(this, this->pc_ + 1, "table index", validate);
if (!this->ValidateTable(this->pc_ + 1, imm)) return 0;
Value index = Peek(0, 0, kWasmI32);
Value result = CreateValue(this->module_->tables[imm.index].type);
@@ -3508,7 +3629,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(TableSet) {
this->detected_->Add(kFeature_reftypes);
- IndexImmediate<validate> imm(this, this->pc_ + 1, "table index");
+ IndexImmediate imm(this, this->pc_ + 1, "table index", validate);
if (!this->ValidateTable(this->pc_ + 1, imm)) return 0;
Value value = Peek(0, 1, this->module_->tables[imm.index].type);
Value index = Peek(1, 0, kWasmI32);
@@ -3522,7 +3643,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(StoreMem) { return DecodeStoreMem(GetStoreType(opcode)); }
DECODE(MemoryGrow) {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ MemoryIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
// This opcode will not be emitted by the asm translator.
DCHECK_EQ(kWasmOrigin, this->module_->origin);
@@ -3536,7 +3657,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(MemorySize) {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ MemoryIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ValueType result_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value result = CreateValue(result_type);
@@ -3546,7 +3667,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(CallFunction) {
- CallFunctionImmediate<validate> imm(this, this->pc_ + 1);
+ CallFunctionImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ArgVector args = PeekArgs(imm.sig);
ReturnVector returns = CreateReturnValues(imm.sig);
@@ -3558,7 +3679,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(CallIndirect) {
- CallIndirectImmediate<validate> imm(this, this->pc_ + 1);
+ CallIndirectImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
Value index =
Peek(0, static_cast<int>(imm.sig->parameter_count()), kWasmI32);
@@ -3574,7 +3695,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(ReturnCall) {
CHECK_PROTOTYPE_OPCODE(return_call);
- CallFunctionImmediate<validate> imm(this, this->pc_ + 1);
+ CallFunctionImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
@@ -3590,7 +3711,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(ReturnCallIndirect) {
CHECK_PROTOTYPE_OPCODE(return_call);
- CallIndirectImmediate<validate> imm(this, this->pc_ + 1);
+ CallIndirectImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
this->DecodeError("%s: %s",
@@ -3611,32 +3732,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// TODO(7748): After a certain grace period, drop this in favor of "CallRef".
DECODE(CallRefDeprecated) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- Value func_ref = Peek(0);
- ValueType func_type = func_ref.type;
- if (func_type == kWasmBottom) {
- // We are in unreachable code, maintain the polymorphic stack.
- return 1;
- }
- if (!VALIDATE(func_type.is_object_reference() && func_type.has_index() &&
- this->module_->has_signature(func_type.ref_index()))) {
- PopTypeError(0, func_ref, "function reference");
- return 0;
- }
- const FunctionSig* sig = this->module_->signature(func_type.ref_index());
- ArgVector args = PeekArgs(sig, 1);
- ReturnVector returns = CreateReturnValues(sig);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(CallRef, func_ref, sig,
- func_type.ref_index(), args.begin(),
- returns.begin());
+ SigIndexImmediate imm(this, this->pc_ + 1, validate);
+ if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ Value func_ref = Peek(0, 0, ValueType::RefNull(imm.index));
+ ArgVector args = PeekArgs(imm.sig, 1);
+ ReturnVector returns = CreateReturnValues(imm.sig);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(CallRef, func_ref, imm.sig, imm.index,
+ args.begin(), returns.begin());
Drop(func_ref);
- DropArgs(sig);
+ DropArgs(imm.sig);
PushReturns(returns);
- return 1;
+ return 1 + imm.length;
}
DECODE(CallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
- SigIndexImmediate<validate> imm(this, this->pc_ + 1);
+ SigIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
Value func_ref = Peek(0, 0, ValueType::RefNull(imm.index));
ArgVector args = PeekArgs(imm.sig, 1);
@@ -3652,7 +3763,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(ReturnCallRef) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
CHECK_PROTOTYPE_OPCODE(return_call);
- SigIndexImmediate<validate> imm(this, this->pc_ + 1);
+ SigIndexImmediate imm(this, this->pc_ + 1, validate);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
Value func_ref = Peek(0, 0, ValueType::RefNull(imm.index));
ArgVector args = PeekArgs(imm.sig, 1);
@@ -3666,7 +3777,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(Numeric) {
uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
this->pc_, &opcode_length, "numeric index");
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
@@ -3677,7 +3788,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Simd) {
- CHECK_PROTOTYPE_OPCODE(simd);
+ this->detected_->Add(kFeature_simd);
if (!CheckHardwareSupportsSimd()) {
if (v8_flags.correctness_fuzzer_suppressions) {
FATAL("Aborting on missing Wasm SIMD support");
@@ -3686,7 +3797,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
this->pc_, &opcode_length);
if (!VALIDATE(this->ok())) return 0;
trace_msg->AppendOpcode(full_opcode);
@@ -3697,9 +3808,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
DECODE(Atomic) {
- CHECK_PROTOTYPE_OPCODE(threads);
+ this->detected_->Add(kFeature_threads);
uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
this->pc_, &opcode_length, "atomic index");
trace_msg->AppendOpcode(full_opcode);
return DecodeAtomicOpcode(full_opcode, opcode_length);
@@ -3707,7 +3818,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE(GC) {
uint32_t opcode_length = 0;
- WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<ValidationTag>(
this->pc_, &opcode_length, "gc index");
trace_msg->AppendOpcode(full_opcode);
if (full_opcode >= kExprStringNewUtf8) {
@@ -3846,8 +3957,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
void EndControl() {
DCHECK(!control_.empty());
Control* current = &control_.back();
- DCHECK_LE(stack_ + current->stack_depth, stack_end_);
- stack_end_ = stack_ + current->stack_depth;
+ stack_.shrink_to(current->stack_depth);
current->reachability = kUnreachable;
current_code_reachable_and_ok_ = false;
}
@@ -3867,8 +3977,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Initializes start- and end-merges of {c} with values according to the
// in- and out-types of {c} respectively.
- void SetBlockType(Control* c, BlockTypeImmediate<validate>& imm,
- Value* args) {
+ void SetBlockType(Control* c, BlockTypeImmediate& imm, Value* args) {
const byte* pc = this->pc_;
InitMerge(&c->end_merge, imm.out_arity(), [pc, &imm](uint32_t i) {
return Value{pc, imm.out_type(i)};
@@ -3890,29 +3999,34 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// to the difference, and return that number.
V8_INLINE int EnsureStackArguments(int count) {
uint32_t limit = control_.back().stack_depth;
- if (V8_LIKELY(stack_size() >= count + limit)) return 0;
+ if (V8_LIKELY(stack_.size() >= count + limit)) return 0;
return EnsureStackArguments_Slow(count, limit);
}
V8_NOINLINE int EnsureStackArguments_Slow(int count, uint32_t limit) {
if (!VALIDATE(control_.back().unreachable())) {
- NotEnoughArgumentsError(count, stack_size() - limit);
+ NotEnoughArgumentsError(count, stack_.size() - limit);
}
// Silently create unreachable values out of thin air underneath the
// existing stack values. To do so, we have to move existing stack values
// upwards in the stack, then instantiate the new Values as
// {UnreachableValue}.
- int current_values = stack_size() - limit;
+ int current_values = stack_.size() - limit;
int additional_values = count - current_values;
DCHECK_GT(additional_values, 0);
- EnsureStackSpace(additional_values);
- stack_end_ += additional_values;
- Value* stack_base = stack_value(current_values + additional_values);
- for (int i = current_values - 1; i >= 0; i--) {
- stack_base[additional_values + i] = stack_base[i];
- }
- for (int i = 0; i < additional_values; i++) {
- stack_base[i] = UnreachableValue(this->pc_);
+ stack_.EnsureMoreCapacity(additional_values, this->compilation_zone_);
+ Value unreachable_value = UnreachableValue(this->pc_);
+ for (int i = 0; i < additional_values; ++i) stack_.push(unreachable_value);
+ if (current_values > 0) {
+ // Move the current values up to the end of the stack, and create
+ // unreachable values below.
+ Value* stack_base = stack_value(current_values + additional_values);
+ for (int i = current_values - 1; i >= 0; i--) {
+ stack_base[additional_values + i] = stack_base[i];
+ }
+ for (int i = 0; i < additional_values; i++) {
+ stack_base[i] = UnreachableValue(this->pc_);
+ }
}
return additional_values;
}
@@ -3961,11 +4075,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return args;
}
- ValueType GetReturnType(const FunctionSig* sig) {
- DCHECK_GE(1, sig->return_count());
- return sig->return_count() == 0 ? kWasmVoid : sig->GetReturn();
- }
-
// TODO(jkummerow): Consider refactoring control stack management so
// that {drop_values} is never needed. That would require decoupling
// creation of the Control object from setting of its stack depth.
@@ -3974,12 +4083,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Reachability reachability = control_.back().innerReachability();
// In unreachable code, we may run out of stack.
uint32_t stack_depth =
- stack_size() >= drop_values ? stack_size() - drop_values : 0;
+ stack_.size() >= drop_values ? stack_.size() - drop_values : 0;
stack_depth = std::max(stack_depth, control_.back().stack_depth);
uint32_t init_stack_depth = this->locals_initialization_stack_depth();
+ control_.EnsureMoreCapacity(1, this->compilation_zone_);
control_.emplace_back(kind, stack_depth, init_stack_depth, this->pc_,
reachability);
- current_code_reachable_and_ok_ = this->ok() && reachability == kReachable;
+ current_code_reachable_and_ok_ =
+ VALIDATE(this->ok()) && reachability == kReachable;
return &control_.back();
}
@@ -3987,7 +4098,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// This cannot be the outermost control block.
DCHECK_LT(1, control_.size());
Control* c = &control_.back();
- DCHECK_LE(stack_ + c->stack_depth, stack_end_);
+ DCHECK_LE(c->stack_depth, stack_.size());
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(PopControl, c);
@@ -4003,15 +4114,16 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
bool parent_reached =
c->reachable() || c->end_merge.reached || c->is_onearmed_if();
- control_.pop_back();
+ control_.pop();
// If the parent block was reachable before, but the popped control does not
// return to here, this block becomes "spec only reachable".
if (!parent_reached) SetSucceedingCodeDynamicallyUnreachable();
- current_code_reachable_and_ok_ = this->ok() && control_.back().reachable();
+ current_code_reachable_and_ok_ =
+ VALIDATE(this->ok()) && control_.back().reachable();
}
int DecodeLoadMem(LoadType type, int prefix_len = 1) {
- MemoryAccessImmediate<validate> imm =
+ MemoryAccessImmediate imm =
MakeMemoryAccessImmediate(prefix_len, type.size_log_2());
if (!this->Validate(this->pc_ + prefix_len, imm)) return 0;
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
@@ -4028,7 +4140,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Load extends always load 64-bits.
uint32_t max_alignment =
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
- MemoryAccessImmediate<validate> imm =
+ MemoryAccessImmediate imm =
MakeMemoryAccessImmediate(opcode_length, max_alignment);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
@@ -4042,11 +4154,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
int DecodeLoadLane(WasmOpcode opcode, LoadType type, uint32_t opcode_length) {
- MemoryAccessImmediate<validate> mem_imm =
+ MemoryAccessImmediate mem_imm =
MakeMemoryAccessImmediate(opcode_length, type.size_log_2());
if (!this->Validate(this->pc_ + opcode_length, mem_imm)) return 0;
- SimdLaneImmediate<validate> lane_imm(
- this, this->pc_ + opcode_length + mem_imm.length);
+ SimdLaneImmediate lane_imm(this, this->pc_ + opcode_length + mem_imm.length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
Value v128 = Peek(0, 1, kWasmS128);
Value index = Peek(1, 0, kWasmI32);
@@ -4061,11 +4173,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeStoreLane(WasmOpcode opcode, StoreType type,
uint32_t opcode_length) {
- MemoryAccessImmediate<validate> mem_imm =
+ MemoryAccessImmediate mem_imm =
MakeMemoryAccessImmediate(opcode_length, type.size_log_2());
if (!this->Validate(this->pc_ + opcode_length, mem_imm)) return 0;
- SimdLaneImmediate<validate> lane_imm(
- this, this->pc_ + opcode_length + mem_imm.length);
+ SimdLaneImmediate lane_imm(this, this->pc_ + opcode_length + mem_imm.length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
Value v128 = Peek(0, 1, kWasmS128);
Value index = Peek(1, 0, kWasmI32);
@@ -4077,7 +4189,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
- MemoryAccessImmediate<validate> imm =
+ MemoryAccessImmediate imm =
MakeMemoryAccessImmediate(prefix_len, store.size_log_2());
if (!this->Validate(this->pc_ + prefix_len, imm)) return 0;
Value value = Peek(0, 1, store.value_type());
@@ -4089,7 +4201,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
uint32_t SimdConstOp(uint32_t opcode_length) {
- Simd128Immediate<validate> imm(this, this->pc_ + opcode_length);
+ Simd128Immediate imm(this, this->pc_ + opcode_length, validate);
Value result = CreateValue(kWasmS128);
CALL_INTERFACE_IF_OK_AND_REACHABLE(S128Const, imm, &result);
Push(result);
@@ -4098,7 +4210,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type,
uint32_t opcode_length) {
- SimdLaneImmediate<validate> imm(this, this->pc_ + opcode_length);
+ SimdLaneImmediate imm(this, this->pc_ + opcode_length, validate);
if (this->Validate(this->pc_ + opcode_length, opcode, imm)) {
Value inputs[] = {Peek(0, 0, kWasmS128)};
Value result = CreateValue(type);
@@ -4112,7 +4224,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
uint32_t SimdReplaceLane(WasmOpcode opcode, ValueType type,
uint32_t opcode_length) {
- SimdLaneImmediate<validate> imm(this, this->pc_ + opcode_length);
+ SimdLaneImmediate imm(this, this->pc_ + opcode_length, validate);
if (this->Validate(this->pc_ + opcode_length, opcode, imm)) {
Value inputs[2] = {Peek(1, 0, kWasmS128), Peek(0, 1, type)};
Value result = CreateValue(kWasmS128);
@@ -4125,7 +4237,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
uint32_t Simd8x16ShuffleOp(uint32_t opcode_length) {
- Simd128Immediate<validate> imm(this, this->pc_ + opcode_length);
+ Simd128Immediate imm(this, this->pc_ + opcode_length, validate);
if (this->Validate(this->pc_ + opcode_length, imm)) {
Value input1 = Peek(0, 1, kWasmS128);
Value input0 = Peek(1, 0, kWasmS128);
@@ -4315,9 +4427,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
+ // This assumption might help the big switch below.
+ V8_ASSUME(opcode >> 8 == kGCPrefix);
switch (opcode) {
case kExprStructNew: {
- StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ StructIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType rtt_type = ValueType::Rtt(imm.index);
Value rtt = CreateValue(rtt_type);
@@ -4333,15 +4447,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ StructIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- if (validate) {
+ if (ValidationTag::validate) {
for (uint32_t i = 0; i < imm.struct_type->field_count(); i++) {
- if (!VALIDATE(imm.struct_type->mutability(i))) {
- this->DecodeError("%s: struct_type %d has immutable field %d",
- WasmOpcodes::OpcodeName(opcode), imm.index, i);
- return 0;
- }
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
this->DecodeError(
@@ -4364,7 +4473,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprStructGet: {
NON_CONST_ONLY
- FieldImmediate<validate> field(this, this->pc_ + opcode_length);
+ FieldImmediate field(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_imm.struct_type->field(field.field_imm.index);
@@ -4388,7 +4497,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprStructGetU:
case kExprStructGetS: {
NON_CONST_ONLY
- FieldImmediate<validate> field(this, this->pc_ + opcode_length);
+ FieldImmediate field(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_imm.struct_type->field(field.field_imm.index);
@@ -4411,7 +4520,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprStructSet: {
NON_CONST_ONLY
- FieldImmediate<validate> field(this, this->pc_ + opcode_length);
+ FieldImmediate field(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
const StructType* struct_type = field.struct_imm.struct_type;
if (!VALIDATE(struct_type->mutability(field.field_imm.index))) {
@@ -4429,7 +4538,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + field.length;
}
case kExprArrayNew: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType rtt_type = ValueType::Rtt(imm.index);
Value rtt = CreateValue(rtt_type);
@@ -4446,13 +4555,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprArrayNewDefault: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- if (!VALIDATE(imm.array_type->mutability())) {
- this->DecodeError("%s: array type %d is immutable",
- WasmOpcodes::OpcodeName(opcode), imm.index);
- return 0;
- }
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
this->DecodeError(
"%s: array type %d has non-defaultable element type %s",
@@ -4473,8 +4577,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprArrayNewData: {
- ArrayIndexImmediate<validate> array_imm(this,
- this->pc_ + opcode_length);
+ ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
ValueType element_type = array_imm.array_type->element_type();
if (element_type.is_reference()) {
@@ -4494,8 +4598,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
#endif
const byte* data_index_pc =
this->pc_ + opcode_length + array_imm.length;
- IndexImmediate<validate> data_segment(this, data_index_pc,
- "data segment");
+ IndexImmediate data_segment(this, data_index_pc, "data segment",
+ validate);
if (!this->ValidateDataSegment(data_index_pc, data_segment)) return 0;
ValueType rtt_type = ValueType::Rtt(array_imm.index);
@@ -4515,8 +4619,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + array_imm.length + data_segment.length;
}
case kExprArrayNewElem: {
- ArrayIndexImmediate<validate> array_imm(this,
- this->pc_ + opcode_length);
+ ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
ValueType element_type = array_imm.array_type->element_type();
if (element_type.is_numeric()) {
@@ -4528,8 +4632,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
const byte* elem_index_pc =
this->pc_ + opcode_length + array_imm.length;
- IndexImmediate<validate> elem_segment(this, elem_index_pc,
- "data segment");
+ IndexImmediate elem_segment(this, elem_index_pc, "data segment",
+ validate);
if (!this->ValidateElementSegment(elem_index_pc, elem_segment)) {
return 0;
}
@@ -4563,7 +4667,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprArrayGetS:
case kExprArrayGetU: {
NON_CONST_ONLY
- ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_packed())) {
this->DecodeError(
@@ -4584,7 +4688,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayGet: {
NON_CONST_ONLY
- ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(!imm.array_type->element_type().is_packed())) {
this->DecodeError(
@@ -4604,7 +4708,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArraySet: {
NON_CONST_ONLY
- ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->mutability())) {
this->DecodeError("array.set: immediate array type %d is immutable",
@@ -4632,7 +4736,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
NON_CONST_ONLY
// Read but ignore an immediate array type index.
// TODO(7748): Remove this once we are ready to make breaking changes.
- ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate imm(this, this->pc_ + opcode_length, validate);
Value array_obj = Peek(0, 0, kWasmArrayRef);
Value value = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayLen, array_obj, &value);
@@ -4642,7 +4746,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayCopy: {
NON_CONST_ONLY
- ArrayIndexImmediate<validate> dst_imm(this, this->pc_ + opcode_length);
+ ArrayIndexImmediate dst_imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, dst_imm)) return 0;
if (!VALIDATE(dst_imm.array_type->mutability())) {
this->DecodeError(
@@ -4650,8 +4754,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
dst_imm.index);
return 0;
}
- ArrayIndexImmediate<validate> src_imm(
- this, this->pc_ + opcode_length + dst_imm.length);
+ ArrayIndexImmediate src_imm(
+ this, this->pc_ + opcode_length + dst_imm.length, validate);
if (!this->Validate(this->pc_ + opcode_length + dst_imm.length,
src_imm)) {
return 0;
@@ -4676,12 +4780,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + dst_imm.length + src_imm.length;
}
case kExprArrayNewFixed: {
- ArrayIndexImmediate<validate> array_imm(this,
- this->pc_ + opcode_length);
+ ArrayIndexImmediate array_imm(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
- IndexImmediate<validate> length_imm(
- this, this->pc_ + opcode_length + array_imm.length,
- "array.new_fixed length");
+ IndexImmediate length_imm(this,
+ this->pc_ + opcode_length + array_imm.length,
+ "array.new_fixed length", validate);
uint32_t elem_count = length_imm.index;
if (!VALIDATE(elem_count <= kV8MaxWasmArrayNewFixedLength)) {
this->DecodeError(
@@ -4731,11 +4835,92 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
+ case kExprRefCast:
+ case kExprRefCastNull: {
+ NON_CONST_ONLY
+ HeapTypeImmediate imm(this->enabled_, this, this->pc_ + opcode_length,
+ validate);
+ this->Validate(this->pc_ + opcode_length, imm);
+ if (!VALIDATE(this->ok())) return 0;
+ opcode_length += imm.length;
+
+ std::optional<Value> rtt;
+ HeapType target_type = imm.type;
+ if (imm.type.is_index()) {
+ rtt = CreateValue(ValueType::Rtt(imm.type.ref_index()));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.type.ref_index(),
+ &rtt.value());
+ Push(rtt.value());
+ }
+
+ Value obj = Peek(rtt.has_value() ? 1 : 0);
+ if (!VALIDATE((obj.type.is_object_reference() &&
+ IsSameTypeHierarchy(obj.type.heap_type(), target_type,
+ this->module_)) ||
+ obj.type.is_bottom())) {
+ this->DecodeError(
+ obj.pc(),
+ "Invalid types for ref.cast: %s of type %s has to "
+ "be in the same reference type hierarchy as (ref %s)",
+ SafeOpcodeNameAt(obj.pc()), obj.type.name().c_str(),
+ target_type.name().c_str());
+ return 0;
+ }
+
+ bool null_succeeds = opcode == kExprRefCastNull;
+ Value value = CreateValue(ValueType::RefMaybeNull(
+ imm.type, (obj.type.is_bottom() || !null_succeeds)
+ ? kNonNullable
+ : obj.type.nullability()));
+ if (current_code_reachable_and_ok_) {
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, target_type))) {
+ // Drop the rtt from the stack, then forward the object value to the
+ // result.
+ if (rtt.has_value()) {
+ CALL_INTERFACE(Drop);
+ }
+ if (obj.type.is_nullable() && !null_succeeds) {
+ CALL_INTERFACE(AssertNotNull, obj, &value);
+ } else {
+ CALL_INTERFACE(Forward, obj, &value);
+ }
+ } else if (V8_UNLIKELY(TypeCheckAlwaysFails(obj, target_type,
+ null_succeeds))) {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(Drop);
+ }
+ // Unrelated types. The only way this will not trap is if the object
+ // is null.
+ if (obj.type.is_nullable() && null_succeeds) {
+ // Drop rtt from the stack, then assert that obj is null.
+ CALL_INTERFACE(AssertNull, obj, &value);
+ } else {
+ CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast);
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
+ }
+ } else {
+ if (rtt.has_value()) {
+ CALL_INTERFACE(RefCast, obj, rtt.value(), &value, null_succeeds);
+ } else {
+ CALL_INTERFACE(RefCastAbstract, obj, target_type, &value,
+ null_succeeds);
+ }
+ }
+ }
+ Drop(1 + rtt.has_value());
+ Push(value);
+ return opcode_length;
+ }
case kExprRefTestNull:
case kExprRefTest: {
NON_CONST_ONLY
- HeapTypeImmediate<validate> imm(
- this->enabled_, this, this->pc_ + opcode_length, this->module_);
+ HeapTypeImmediate imm(this->enabled_, this, this->pc_ + opcode_length,
+ validate);
+ this->Validate(this->pc_ + opcode_length, imm);
if (!VALIDATE(this->ok())) return 0;
opcode_length += imm.length;
@@ -4806,8 +4991,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprRefTestDeprecated: {
NON_CONST_ONLY
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "type index");
+ IndexImmediate imm(this, this->pc_ + opcode_length, "type index",
+ validate);
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
opcode_length += imm.length;
Value rtt = CreateValue(ValueType::Rtt(imm.index));
@@ -4816,9 +5001,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Value obj = Peek(1);
Value value = CreateValue(kWasmI32);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmDataRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmStructRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) ||
obj.type.is_bottom())) {
- PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
+ PopTypeError(0, obj,
+ "subtype of (ref null func), (ref null struct) or (ref "
+ "null array)");
return 0;
}
if (current_code_reachable_and_ok_) {
@@ -4856,15 +5044,18 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
"--experimental-wasm-ref-cast-nop)");
return 0;
}
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "type index");
+ IndexImmediate imm(this, this->pc_ + opcode_length, "type index",
+ validate);
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
opcode_length += imm.length;
Value obj = Peek(0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmDataRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmStructRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) ||
obj.type.is_bottom())) {
- PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
+ PopTypeError(0, obj,
+ "subtype of (ref null func), (ref null struct) or (ref "
+ "null array)");
return 0;
}
Value value = CreateValue(ValueType::RefMaybeNull(
@@ -4875,10 +5066,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprRefCast: {
+ case kExprRefCastDeprecated: {
NON_CONST_ONLY
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "type index");
+ IndexImmediate imm(this, this->pc_ + opcode_length, "type index",
+ validate);
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
opcode_length += imm.length;
Value rtt = CreateValue(ValueType::Rtt(imm.index));
@@ -4886,9 +5077,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(rtt);
Value obj = Peek(1);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmDataRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmStructRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) ||
obj.type.is_bottom())) {
- PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
+ PopTypeError(0, obj,
+ "subtype of (ref null func), (ref null struct) or (ref "
+ "null array)");
return 0;
}
// If either value is bottom, we emit the most specific type possible.
@@ -4918,7 +5112,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
SetSucceedingCodeDynamicallyUnreachable();
}
} else {
- CALL_INTERFACE(RefCast, obj, rtt, &value);
+ bool null_succeeds = true;
+ CALL_INTERFACE(RefCast, obj, rtt, &value, null_succeeds);
}
}
Drop(2);
@@ -4927,14 +5122,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprBrOnCast: {
NON_CONST_ONLY
- BranchDepthImmediate<validate> branch_depth(this,
- this->pc_ + opcode_length);
+ BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
control_.size())) {
return 0;
}
uint32_t pc_offset = opcode_length + branch_depth.length;
- IndexImmediate<validate> imm(this, this->pc_ + pc_offset, "type index");
+ IndexImmediate imm(this, this->pc_ + pc_offset, "type index", validate);
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
pc_offset += imm.length;
Value rtt = CreateValue(ValueType::Rtt(imm.index));
@@ -4943,9 +5138,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// anyway.
Value obj = Peek(0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmDataRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmStructRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) ||
obj.type.is_bottom())) {
- PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
+ PopTypeError(0, obj,
+ "subtype of (ref null func), (ref null struct) or (ref "
+ "null array)");
return 0;
}
Control* c = control_at(branch_depth.depth);
@@ -4996,23 +5194,26 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprBrOnCastFail: {
NON_CONST_ONLY
- BranchDepthImmediate<validate> branch_depth(this,
- this->pc_ + opcode_length);
+ BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
control_.size())) {
return 0;
}
uint32_t pc_offset = opcode_length + branch_depth.length;
- IndexImmediate<validate> imm(this, this->pc_ + pc_offset, "type index");
+ IndexImmediate imm(this, this->pc_ + pc_offset, "type index", validate);
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
pc_offset += imm.length;
Value rtt = CreateValue(ValueType::Rtt(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
Value obj = Peek(0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
- IsSubtypeOf(obj.type, kWasmDataRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmStructRef, this->module_) ||
+ IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) ||
obj.type.is_bottom())) {
- PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
+ PopTypeError(0, obj,
+ "subtype of (ref null func), (ref null struct) or (ref "
+ "null array)");
return 0;
}
Control* c = control_at(branch_depth.depth);
@@ -5067,7 +5268,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprRefIs##h_type: { \
NON_CONST_ONLY \
Value arg = Peek(0, 0, kWasmAnyRef); \
- if (this->failed()) return 0; \
+ if (!VALIDATE(this->ok())) return 0; \
Value result = CreateValue(kWasmI32); \
if (V8_LIKELY(current_code_reachable_and_ok_)) { \
if (IsHeapSubtypeOf(arg.type.heap_type(), HeapType(HeapType::k##h_type), \
@@ -5092,7 +5293,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(result); \
return opcode_length; \
}
- ABSTRACT_TYPE_CHECK(Data)
+ ABSTRACT_TYPE_CHECK(Struct)
ABSTRACT_TYPE_CHECK(I31)
ABSTRACT_TYPE_CHECK(Array)
#undef ABSTRACT_TYPE_CHECK
@@ -5126,17 +5327,17 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(result); \
return opcode_length; \
}
- ABSTRACT_TYPE_CAST(Data)
+ ABSTRACT_TYPE_CAST(Struct)
ABSTRACT_TYPE_CAST(I31)
ABSTRACT_TYPE_CAST(Array)
#undef ABSTRACT_TYPE_CAST
- case kExprBrOnData:
+ case kExprBrOnStruct:
case kExprBrOnArray:
case kExprBrOnI31: {
NON_CONST_ONLY
- BranchDepthImmediate<validate> branch_depth(this,
- this->pc_ + opcode_length);
+ BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
control_.size())) {
return 0;
@@ -5157,7 +5358,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Value obj = Peek(0, 0, kWasmAnyRef);
Drop(obj);
HeapType::Representation heap_type =
- opcode == kExprBrOnData ? HeapType::kData
+ opcode == kExprBrOnStruct ? HeapType::kStruct
: opcode == kExprBrOnArray ? HeapType::kArray
: HeapType::kI31;
Value result_on_branch = CreateValue(ValueType::Ref(heap_type));
@@ -5168,8 +5369,9 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// {result_on_branch} which was passed-by-value to {Push}.
Value* value_on_branch = stack_value(1);
if (V8_LIKELY(current_code_reachable_and_ok_)) {
- if (opcode == kExprBrOnData) {
- CALL_INTERFACE(BrOnData, obj, value_on_branch, branch_depth.depth);
+ if (opcode == kExprBrOnStruct) {
+ CALL_INTERFACE(BrOnStruct, obj, value_on_branch,
+ branch_depth.depth);
} else if (opcode == kExprBrOnArray) {
CALL_INTERFACE(BrOnArray, obj, value_on_branch, branch_depth.depth);
} else {
@@ -5181,12 +5383,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
- case kExprBrOnNonData:
+ case kExprBrOnNonStruct:
case kExprBrOnNonArray:
case kExprBrOnNonI31: {
NON_CONST_ONLY
- BranchDepthImmediate<validate> branch_depth(this,
- this->pc_ + opcode_length);
+ BranchDepthImmediate branch_depth(this, this->pc_ + opcode_length,
+ validate);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
control_.size())) {
return 0;
@@ -5202,14 +5404,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Value obj = Peek(0, 0, kWasmAnyRef);
HeapType::Representation heap_type =
- opcode == kExprBrOnNonData ? HeapType::kData
+ opcode == kExprBrOnNonStruct ? HeapType::kStruct
: opcode == kExprBrOnNonArray ? HeapType::kArray
: HeapType::kI31;
Value value_on_fallthrough = CreateValue(ValueType::Ref(heap_type));
if (V8_LIKELY(current_code_reachable_and_ok_)) {
- if (opcode == kExprBrOnNonData) {
- CALL_INTERFACE(BrOnNonData, obj, &value_on_fallthrough,
+ if (opcode == kExprBrOnNonStruct) {
+ CALL_INTERFACE(BrOnNonStruct, obj, &value_on_fallthrough,
branch_depth.depth);
} else if (opcode == kExprBrOnNonArray) {
CALL_INTERFACE(BrOnNonArray, obj, &value_on_fallthrough,
@@ -5257,7 +5459,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeStringNewWtf8(unibrow::Utf8Variant variant,
uint32_t opcode_length) {
NON_CONST_ONLY
- MemoryIndexImmediate<validate> memory(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate memory(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, memory)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value offset = Peek(1, 0, addr_type);
@@ -5285,7 +5487,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeStringEncodeWtf8(unibrow::Utf8Variant variant,
uint32_t opcode_length) {
NON_CONST_ONLY
- MemoryIndexImmediate<validate> memory(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate memory(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, memory)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value str = Peek(1, 0, kWasmStringRef);
@@ -5301,7 +5503,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeStringViewWtf8Encode(unibrow::Utf8Variant variant,
uint32_t opcode_length) {
NON_CONST_ONLY
- MemoryIndexImmediate<validate> memory(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate memory(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, memory)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value view = Peek(3, 0, kWasmStringViewWtf8);
@@ -5348,6 +5550,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
int DecodeStringRefOpcode(WasmOpcode opcode, uint32_t opcode_length) {
+ // This assumption might help the big switch below.
+ V8_ASSUME(opcode >> 8 == kGCPrefix);
switch (opcode) {
case kExprStringNewUtf8:
return DecodeStringNewWtf8(unibrow::Utf8Variant::kUtf8, opcode_length);
@@ -5358,7 +5562,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return DecodeStringNewWtf8(unibrow::Utf8Variant::kWtf8, opcode_length);
case kExprStringNewWtf16: {
NON_CONST_ONLY
- MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value offset = Peek(1, 0, addr_type);
@@ -5371,7 +5575,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprStringConst: {
- StringConstImmediate<validate> imm(this, this->pc_ + opcode_length);
+ StringConstImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value result = CreateValue(ValueType::Ref(HeapType::kString));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StringConst, imm, &result);
@@ -5404,7 +5608,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
opcode_length);
case kExprStringEncodeWtf16: {
NON_CONST_ONLY
- MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value str = Peek(1, 0, kWasmStringRef);
@@ -5518,7 +5722,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprStringViewWtf16Encode: {
NON_CONST_ONLY
- MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value view = Peek(3, 0, kWasmStringViewWtf16);
@@ -5653,18 +5857,17 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
#undef NON_CONST_ONLY
uint32_t DecodeAtomicOpcode(WasmOpcode opcode, uint32_t opcode_length) {
- ValueType ret_type;
- const FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (!VALIDATE(sig != nullptr)) {
- this->DecodeError("invalid atomic opcode");
+ // Fast check for out-of-range opcodes (only allow 0xfeXX).
+ if (!VALIDATE((opcode >> 8) == kAtomicPrefix)) {
+ this->DecodeError("invalid atomic opcode: 0x%x", opcode);
return 0;
}
+
MachineType memtype;
switch (opcode) {
#define CASE_ATOMIC_STORE_OP(Name, Type) \
case kExpr##Name: { \
memtype = MachineType::Type(); \
- ret_type = kWasmVoid; \
break; /* to generic mem access code below */ \
}
ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
@@ -5672,14 +5875,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
#define CASE_ATOMIC_OP(Name, Type) \
case kExpr##Name: { \
memtype = MachineType::Type(); \
- ret_type = GetReturnType(sig); \
break; /* to generic mem access code below */ \
}
ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
case kExprAtomicFence: {
- byte zero =
- this->template read_u8<validate>(this->pc_ + opcode_length, "zero");
+ byte zero = this->template read_u8<ValidationTag>(
+ this->pc_ + opcode_length, "zero");
if (!VALIDATE(zero == 0)) {
this->DecodeError(this->pc_ + opcode_length,
"invalid atomic operand");
@@ -5689,11 +5891,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 1 + opcode_length;
}
default:
- this->DecodeError("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode: 0x%x", opcode);
return 0;
}
- MemoryAccessImmediate<validate> imm = MakeMemoryAccessImmediate(
+ const FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ V8_ASSUME(sig != nullptr);
+
+ MemoryAccessImmediate imm = MakeMemoryAccessImmediate(
opcode_length, ElementSizeLog2Of(memtype.representation()));
if (!this->Validate(this->pc_ + opcode_length, imm)) return false;
@@ -5701,12 +5906,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// then).
CHECK(!this->module_->is_memory64);
ArgVector args = PeekArgs(sig);
- if (ret_type == kWasmVoid) {
+ if (sig->return_count() == 0) {
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
imm, nullptr);
DropArgs(sig);
} else {
- Value result = CreateValue(GetReturnType(sig));
+ DCHECK_EQ(1, sig->return_count());
+ Value result = CreateValue(sig->GetReturn());
CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
imm, &result);
DropArgs(sig);
@@ -5716,6 +5922,14 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
unsigned DecodeNumericOpcode(WasmOpcode opcode, uint32_t opcode_length) {
+ // Fast check for out-of-range opcodes (only allow 0xfcXX).
+ // This avoids a dynamic check in signature lookup, and might also help the
+ // big switch below.
+ if (!VALIDATE((opcode >> 8) == kNumericPrefix)) {
+ this->DecodeError("invalid numeric opcode: 0x%x", opcode);
+ return 0;
+ }
+
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
switch (opcode) {
case kExprI32SConvertSatF32:
@@ -5730,7 +5944,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length;
}
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ MemoryInitImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value size = Peek(0, 2, kWasmI32);
@@ -5741,8 +5955,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprDataDrop: {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "data segment index");
+ IndexImmediate imm(this, this->pc_ + opcode_length,
+ "data segment index", validate);
if (!this->ValidateDataSegment(this->pc_ + opcode_length, imm)) {
return 0;
}
@@ -5750,7 +5964,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ MemoryCopyImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value size = Peek(0, 2, mem_type);
@@ -5761,7 +5975,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ MemoryIndexImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value size = Peek(0, 2, mem_type);
@@ -5772,7 +5986,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ TableInitImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PeekArgs(sig);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableInit, imm,
@@ -5781,8 +5995,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprElemDrop: {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "element segment index");
+ IndexImmediate imm(this, this->pc_ + opcode_length,
+ "element segment index", validate);
if (!this->ValidateElementSegment(this->pc_ + opcode_length, imm)) {
return 0;
}
@@ -5790,7 +6004,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ TableCopyImmediate imm(this, this->pc_ + opcode_length, validate);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PeekArgs(sig);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableCopy, imm,
@@ -5799,8 +6013,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprTableGrow: {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "table index");
+ IndexImmediate imm(this, this->pc_ + opcode_length, "table index",
+ validate);
if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
Value delta = Peek(0, 1, kWasmI32);
Value value = Peek(1, 0, this->module_->tables[imm.index].type);
@@ -5812,8 +6026,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprTableSize: {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "table index");
+ IndexImmediate imm(this, this->pc_ + opcode_length, "table index",
+ validate);
if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
Value result = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableSize, imm, &result);
@@ -5821,8 +6035,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprTableFill: {
- IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
- "table index");
+ IndexImmediate imm(this, this->pc_ + opcode_length, "table index",
+ validate);
if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
Value count = Peek(0, 2, kWasmI32);
Value value = Peek(1, 1, this->module_->tables[imm.index].type);
@@ -5832,59 +6046,36 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
default:
- this->DecodeError("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode: 0x%x", opcode);
return 0;
}
}
- V8_INLINE void EnsureStackSpace(int slots_needed) {
- if (V8_LIKELY(stack_capacity_end_ - stack_end_ >= slots_needed)) return;
- GrowStackSpace(slots_needed);
- }
-
- V8_NOINLINE void GrowStackSpace(int slots_needed) {
- size_t new_stack_capacity =
- std::max(size_t{8},
- base::bits::RoundUpToPowerOfTwo(stack_size() + slots_needed));
- Value* new_stack =
- this->zone()->template NewArray<Value>(new_stack_capacity);
- if (stack_) {
- std::copy(stack_, stack_end_, new_stack);
- this->zone()->DeleteArray(stack_, stack_capacity_end_ - stack_);
- }
- stack_end_ = new_stack + (stack_end_ - stack_);
- stack_ = new_stack;
- stack_capacity_end_ = new_stack + new_stack_capacity;
- }
-
V8_INLINE Value CreateValue(ValueType type) { return Value{this->pc_, type}; }
V8_INLINE void Push(Value value) {
DCHECK_NE(kWasmVoid, value.type);
- // {EnsureStackSpace} should have been called before, either in the central
- // decoding loop, or individually if more than one element is pushed.
- DCHECK_GT(stack_capacity_end_, stack_end_);
- *stack_end_ = value;
- ++stack_end_;
+ // {stack_.EnsureMoreCapacity} should have been called before, either in the
+ // central decoding loop, or individually if more than one element is
+ // pushed.
+ stack_.push(value);
}
void PushMergeValues(Control* c, Merge<Value>* merge) {
if (decoding_mode == kConstantExpression) return;
DCHECK_EQ(c, &control_.back());
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- DCHECK_LE(stack_ + c->stack_depth, stack_end_);
- stack_end_ = stack_ + c->stack_depth;
+ stack_.shrink_to(c->stack_depth);
if (merge->arity == 1) {
- // {EnsureStackSpace} should have been called before in the central
- // decoding loop.
- DCHECK_GT(stack_capacity_end_, stack_end_);
- *stack_end_++ = merge->vals.first;
+ // {stack_.EnsureMoreCapacity} should have been called before in the
+ // central decoding loop.
+ stack_.push(merge->vals.first);
} else {
- EnsureStackSpace(merge->arity);
+ stack_.EnsureMoreCapacity(merge->arity, this->compilation_zone_);
for (uint32_t i = 0; i < merge->arity; i++) {
- *stack_end_++ = merge->vals.array[i];
+ stack_.push(merge->vals.array[i]);
}
}
- DCHECK_EQ(c->stack_depth + merge->arity, stack_size());
+ DCHECK_EQ(c->stack_depth + merge->arity, stack_.size());
}
V8_INLINE ReturnVector CreateReturnValues(const FunctionSig* sig) {
@@ -5895,7 +6086,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return values;
}
V8_INLINE void PushReturns(ReturnVector values) {
- EnsureStackSpace(static_cast<int>(values.size()));
+ stack_.EnsureMoreCapacity(static_cast<int>(values.size()),
+ this->compilation_zone_);
for (Value& value : values) Push(value);
}
@@ -5937,16 +6129,16 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_INLINE Value Peek(int depth) {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
- if (V8_UNLIKELY(stack_size() <= limit + depth)) {
+ if (V8_UNLIKELY(stack_.size() <= limit + depth)) {
// Peeking past the current control start in reachable code.
if (!VALIDATE(decoding_mode == kFunctionBody &&
control_.back().unreachable())) {
- NotEnoughArgumentsError(depth + 1, stack_size() - limit);
+ NotEnoughArgumentsError(depth + 1, stack_.size() - limit);
}
return UnreachableValue(this->pc_);
}
- DCHECK_LE(stack_, stack_end_ - depth - 1);
- return *(stack_end_ - depth - 1);
+ DCHECK_LT(depth, stack_.size());
+ return *(stack_.end() - depth - 1);
}
Value PeekPackedArray(uint32_t stack_depth, uint32_t operand_index,
@@ -5991,12 +6183,11 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_INLINE void Drop(int count = 1) {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
- if (V8_UNLIKELY(stack_size() < limit + count)) {
+ if (V8_UNLIKELY(stack_.size() < limit + count)) {
// Pop what we can.
- count = std::min(count, static_cast<int>(stack_size() - limit));
+ count = std::min(count, static_cast<int>(stack_.size() - limit));
}
- DCHECK_LE(stack_, stack_end_ - count);
- stack_end_ -= count;
+ stack_.pop(count);
}
// Drop the top stack element if present. Takes a Value input for more
// descriptive call sites.
@@ -6030,14 +6221,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
template <StackElementsCountMode strict_count, bool push_branch_values,
MergeType merge_type>
bool TypeCheckStackAgainstMerge(uint32_t drop_values, Merge<Value>* merge) {
- static_assert(validate, "Call this function only within VALIDATE");
constexpr const char* merge_description =
merge_type == kBranchMerge ? "branch"
: merge_type == kReturnMerge ? "return"
: merge_type == kInitExprMerge ? "constant expression"
: "fallthru";
uint32_t arity = merge->arity;
- uint32_t actual = stack_size() - control_.back().stack_depth;
+ uint32_t actual = stack_.size() - control_.back().stack_depth;
// Here we have to check for !unreachable(), because we need to typecheck as
// if the current code is reachable even if it is spec-only reachable.
if (V8_LIKELY(decoding_mode == kConstantExpression ||
@@ -6050,7 +6240,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return false;
}
// Typecheck the topmost {merge->arity} values on the stack.
- Value* stack_values = stack_end_ - (arity + drop_values);
+ Value* stack_values = stack_.end() - (arity + drop_values);
for (uint32_t i = 0; i < arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
@@ -6078,9 +6268,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
uint32_t inserted_value_count =
static_cast<uint32_t>(EnsureStackArguments(drop_values + arity));
if (inserted_value_count > 0) {
- // EnsureStackSpace may have inserted unreachable values into the bottom
- // of the stack. If so, mark them with the correct type. If drop values
- // were also inserted, disregard them, as they will be dropped anyway.
+ // stack_.EnsureMoreCapacity() may have inserted unreachable values into
+ // the bottom of the stack. If so, mark them with the correct type. If
+ // drop values were also inserted, disregard them, as they will be
+ // dropped anyway.
Value* stack_base = stack_value(drop_values + arity);
for (uint32_t i = 0; i < std::min(arity, inserted_value_count); i++) {
if (stack_base[i].type == kWasmBottom) {
@@ -6089,7 +6280,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
}
}
- return this->ok();
+ return VALIDATE(this->ok());
}
template <StackElementsCountMode strict_count, MergeType merge_type>
@@ -6099,7 +6290,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return false;
}
DCHECK_IMPLIES(current_code_reachable_and_ok_,
- stack_size() >= this->sig_->return_count());
+ stack_.size() >= this->sig_->return_count());
CALL_INTERFACE_IF_OK_AND_REACHABLE(DoReturn, 0);
EndControl();
return true;
@@ -6116,7 +6307,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
bool TypeCheckOneArmedIf(Control* c) {
- static_assert(validate, "Call this function only within VALIDATE");
DCHECK(c->is_onearmed_if());
if (c->end_merge.arity != c->start_merge.arity) {
this->DecodeError(c->pc(),
@@ -6136,7 +6326,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
bool TypeCheckFallThru() {
- static_assert(validate, "Call this function only within VALIDATE");
return TypeCheckStackAgainstMerge<kStrictCounting, true, kFallthroughMerge>(
0, &control_.back().end_merge);
}
@@ -6153,7 +6342,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// (index) and br_on_null (reference), and 0 for all other branches.
template <bool push_branch_values>
bool TypeCheckBranch(Control* c, uint32_t drop_values) {
- static_assert(validate, "Call this function only within VALIDATE");
return TypeCheckStackAgainstMerge<kNonStrictCounting, push_branch_values,
kBranchMerge>(drop_values, c->br_merge());
}
@@ -6221,15 +6409,17 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
FOREACH_SIGNATURE(DEFINE_SIMPLE_SIG_OPERATOR)
#undef DEFINE_SIMPLE_SIG_OPERATOR
+
+ static constexpr ValidationTag validate = {};
};
class EmptyInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ using ValidationTag = Decoder::FullValidationTag;
static constexpr DecodingMode decoding_mode = kFunctionBody;
- using Value = ValueBase<validate>;
- using Control = ControlBase<Value, validate>;
- using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
+ using Value = ValueBase<ValidationTag>;
+ using Control = ControlBase<Value, ValidationTag>;
+ using FullDecoder = WasmFullDecoder<ValidationTag, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
void name(FullDecoder* decoder, ##__VA_ARGS__) {}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 2d25af7e27..62d1cd552c 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -16,16 +16,18 @@ namespace v8 {
namespace internal {
namespace wasm {
-bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
+template <typename ValidationTag>
+bool DecodeLocalDecls(WasmFeatures enabled, BodyLocalDecls* decls,
const WasmModule* module, const byte* start,
const byte* end, Zone* zone) {
+ if constexpr (ValidationTag::validate) DCHECK_NOT_NULL(module);
WasmFeatures no_features = WasmFeatures::None();
constexpr FixedSizeSignature<ValueType, 0, 0> kNoSig;
- WasmDecoder<Decoder::kFullValidation> decoder(
- zone, module, enabled, &no_features, &kNoSig, start, end, 0);
+ WasmDecoder<ValidationTag> decoder(zone, module, enabled, &no_features,
+ &kNoSig, start, end);
uint32_t length;
decoder.DecodeLocals(decoder.pc(), &length);
- if (decoder.failed()) {
+ if (ValidationTag::validate && decoder.failed()) {
decls->encoded_size = 0;
return false;
}
@@ -38,6 +40,22 @@ bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
return true;
}
+void DecodeLocalDecls(WasmFeatures enabled, BodyLocalDecls* decls,
+ const byte* start, const byte* end, Zone* zone) {
+ constexpr WasmModule* kNoModule = nullptr;
+ DecodeLocalDecls<Decoder::NoValidationTag>(enabled, decls, kNoModule, start,
+ end, zone);
+}
+
+bool ValidateAndDecodeLocalDeclsForTesting(WasmFeatures enabled,
+ BodyLocalDecls* decls,
+ const WasmModule* module,
+ const byte* start, const byte* end,
+ Zone* zone) {
+ return DecodeLocalDecls<Decoder::BooleanValidationTag>(enabled, decls, module,
+ start, end, zone);
+}
+
BytecodeIterator::BytecodeIterator(const byte* start, const byte* end)
: Decoder(start, end) {}
@@ -46,10 +64,9 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
: Decoder(start, end) {
DCHECK_NOT_NULL(decls);
DCHECK_NOT_NULL(zone);
- if (DecodeLocalDecls(WasmFeatures::All(), decls, nullptr, start, end, zone)) {
- pc_ += decls->encoded_size;
- if (pc_ > end_) pc_ = end_;
- }
+ DecodeLocalDecls(WasmFeatures::All(), decls, start, end, zone);
+ pc_ += decls->encoded_size;
+ if (pc_ > end_) pc_ = end_;
}
DecodeResult ValidateFunctionBody(AccountingAllocator* allocator,
@@ -58,7 +75,7 @@ DecodeResult ValidateFunctionBody(AccountingAllocator* allocator,
WasmFeatures* detected,
const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kFullValidation, EmptyInterface> decoder(
+ WasmFullDecoder<Decoder::FullValidationTag, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
decoder.Decode();
return decoder.toResult(nullptr);
@@ -69,10 +86,10 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidation> decoder(
+ WasmDecoder<Decoder::NoValidationTag> decoder(
no_zone, no_module, WasmFeatures::All(), &unused_detected_features,
no_sig, pc, end, 0);
- return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
+ return WasmDecoder<Decoder::NoValidationTag>::OpcodeLength(&decoder, pc);
}
bool CheckHardwareSupportsSimd() { return CpuFeatures::SupportsWasmSimd128(); }
@@ -82,7 +99,7 @@ std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
const byte* pc, const byte* end) {
WasmFeatures unused_detected_features = WasmFeatures::None();
Zone* no_zone = nullptr;
- WasmDecoder<Decoder::kNoValidation> decoder(
+ WasmDecoder<Decoder::NoValidationTag> decoder(
no_zone, module, WasmFeatures::All(), &unused_detected_features, sig, pc,
end);
return decoder.StackEffect(pc);
@@ -131,7 +148,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
WasmFeatures unused_detected_features = WasmFeatures::None();
- WasmDecoder<Decoder::kNoValidation> decoder(
+ WasmDecoder<Decoder::NoValidationTag> decoder(
&zone, module, WasmFeatures::All(), &unused_detected_features, body.sig,
body.start, body.end);
int line_nr = 0;
@@ -181,7 +198,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
unsigned length =
- WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, i.pc());
+ WasmDecoder<Decoder::NoValidationTag>::OpcodeLength(&decoder, i.pc());
unsigned offset = 1;
WasmOpcode opcode = i.current();
@@ -216,9 +233,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
if (i.pc()[1] & 0x80) {
uint32_t temp_length;
ValueType type =
- value_type_reader::read_value_type<Decoder::kNoValidation>(
- &decoder, i.pc() + 1, &temp_length, module,
- WasmFeatures::All());
+ value_type_reader::read_value_type<Decoder::NoValidationTag>(
+ &decoder, i.pc() + 1, &temp_length, WasmFeatures::All());
if (temp_length == 1) {
os << type.name() << ",";
} else {
@@ -251,8 +267,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(), &i,
- i.pc() + 1, module);
+ BlockTypeImmediate imm(WasmFeatures::All(), &i, i.pc() + 1,
+ Decoder::kNoValidation);
os << " @" << i.pc_offset();
CHECK(decoder.Validate(i.pc() + 1, imm));
for (uint32_t j = 0; j < imm.out_arity(); j++) {
@@ -266,29 +282,29 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ BranchDepthImmediate imm(&i, i.pc() + 1, Decoder::kNoValidation);
os << " depth=" << imm.depth;
break;
}
case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ BranchDepthImmediate imm(&i, i.pc() + 1, Decoder::kNoValidation);
os << " depth=" << imm.depth;
break;
}
case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ BranchTableImmediate imm(&i, i.pc() + 1, Decoder::kNoValidation);
os << " entries=" << imm.table_count;
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ CallIndirectImmediate imm(&i, i.pc() + 1, Decoder::kNoValidation);
os << " sig #" << imm.sig_imm.index;
CHECK(decoder.Validate(i.pc() + 1, imm));
os << ": " << *imm.sig;
break;
}
case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ CallFunctionImmediate imm(&i, i.pc() + 1, Decoder::kNoValidation);
os << " function #" << imm.index;
CHECK(decoder.Validate(i.pc() + 1, imm));
os << ": " << *imm.sig;
@@ -309,9 +325,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, uint32_t num_locals,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
- WasmDecoder<Decoder::kFullValidation> decoder(
+ WasmDecoder<Decoder::FullValidationTag> decoder(
zone, nullptr, no_features, &no_features, nullptr, start, end, 0);
- return WasmDecoder<Decoder::kFullValidation>::AnalyzeLoopAssignment(
+ return WasmDecoder<Decoder::FullValidationTag>::AnalyzeLoopAssignment(
&decoder, start, num_locals, zone);
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index d6fb2cfd99..0b2073e2a9 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -68,12 +68,17 @@ struct BodyLocalDecls {
ValueType* local_types = nullptr;
};
-V8_EXPORT_PRIVATE bool DecodeLocalDecls(const WasmFeatures& enabled,
+// Decode locals; validation is not performed.
+V8_EXPORT_PRIVATE void DecodeLocalDecls(WasmFeatures enabled,
BodyLocalDecls* decls,
- const WasmModule* module,
const byte* start, const byte* end,
Zone* zone);
+// Decode locals, including validation.
+V8_EXPORT_PRIVATE bool ValidateAndDecodeLocalDeclsForTesting(
+ WasmFeatures enabled, BodyLocalDecls* decls, const WasmModule* module,
+ const byte* start, const byte* end, Zone* zone);
+
V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(
Zone* zone, uint32_t num_locals, const byte* start, const byte* end);
@@ -170,7 +175,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
WasmOpcode current() {
return static_cast<WasmOpcode>(
- read_u8<Decoder::kNoValidation>(pc_, "expected bytecode"));
+ read_u8<Decoder::NoValidationTag>(pc_, "expected bytecode"));
}
void next() {
@@ -183,7 +188,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- return read_prefixed_opcode<Decoder::kNoValidation>(pc_);
+ return read_prefixed_opcode<Decoder::NoValidationTag>(pc_);
}
};
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 46a4f545b3..51eb935110 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -130,6 +130,17 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
V8_FALLTHROUGH;
case ExecutionTier::kTurbofan:
+ // Before executing TurboFan compilation, make sure that the function was
+ // validated (because TurboFan compilation assumes valid input).
+ if (V8_UNLIKELY(!env->module->function_was_validated(func_index_))) {
+ AccountingAllocator allocator;
+ if (ValidateFunctionBody(&allocator, env->enabled_features, env->module,
+ detected, func_body)
+ .failed()) {
+ return {};
+ }
+ env->module->set_function_validated(func_index_);
+ }
result = compiler::ExecuteTurbofanWasmCompilation(
env, wire_bytes_storage, func_body, func_index_, counters,
buffer_cache, detected);
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index fb1f19c2b1..95de1e88f7 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -71,11 +71,12 @@ struct SsaEnv : public ZoneObject {
class WasmGraphBuildingInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
- using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
+ using ValidationTag = Decoder::NoValidationTag;
+ using FullDecoder =
+ WasmFullDecoder<ValidationTag, WasmGraphBuildingInterface>;
using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
- struct Value : public ValueBase<validate> {
+ struct Value : public ValueBase<ValidationTag> {
TFNode* node = nullptr;
template <typename... Args>
@@ -88,7 +89,6 @@ class WasmGraphBuildingInterface {
struct TryInfo : public ZoneObject {
SsaEnv* catch_env;
TFNode* exception = nullptr;
- bool first_catch = true;
bool might_throw() const { return exception != nullptr; }
@@ -97,7 +97,7 @@ class WasmGraphBuildingInterface {
explicit TryInfo(SsaEnv* c) : catch_env(c) {}
};
- struct Control : public ControlBase<Value, validate> {
+ struct Control : public ControlBase<Value, ValidationTag> {
SsaEnv* merge_env = nullptr; // merge environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
@@ -252,7 +252,7 @@ class WasmGraphBuildingInterface {
builder_->TerminateLoop(effect(), control());
// Doing a preprocessing pass to analyze loop assignments seems to pay off
// compared to reallocating Nodes when rearranging Phis in Goto.
- BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
+ BitVector* assigned = WasmDecoder<ValidationTag>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->num_locals(), decoder->zone());
if (decoder->failed()) return;
int instance_cache_index = decoder->num_locals();
@@ -408,7 +408,7 @@ class WasmGraphBuildingInterface {
SetAndTypeNode(result, builder_->Float64Constant(value));
}
- void S128Const(FullDecoder* decoder, const Simd128Immediate<validate>& imm,
+ void S128Const(FullDecoder* decoder, const Simd128Immediate& imm,
Value* result) {
SetAndTypeNode(result, builder_->Simd128Constant(imm.value));
}
@@ -422,49 +422,46 @@ class WasmGraphBuildingInterface {
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
- TFNode* cast_node =
- v8_flags.experimental_wasm_skip_null_checks
- ? builder_->TypeGuard(arg.node, result->type)
- : builder_->RefAsNonNull(arg.node, decoder->position());
+ TFNode* cast_node = builder_->AssertNotNull(arg.node, decoder->position());
SetAndTypeNode(result, cast_node);
}
void Drop(FullDecoder* decoder) {}
void LocalGet(FullDecoder* decoder, Value* result,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
result->node = ssa_env_->locals[imm.index];
}
void LocalSet(FullDecoder* decoder, const Value& value,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
ssa_env_->locals[imm.index] = value.node;
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
result->node = value.node;
ssa_env_->locals[imm.index] = value.node;
}
void GlobalGet(FullDecoder* decoder, Value* result,
- const GlobalIndexImmediate<validate>& imm) {
+ const GlobalIndexImmediate& imm) {
SetAndTypeNode(result, builder_->GlobalGet(imm.index));
}
void GlobalSet(FullDecoder* decoder, const Value& value,
- const GlobalIndexImmediate<validate>& imm) {
+ const GlobalIndexImmediate& imm) {
builder_->GlobalSet(imm.index, value.node);
}
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
SetAndTypeNode(
result, builder_->TableGet(imm.index, index.node, decoder->position()));
}
void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
- const IndexImmediate<validate>& imm) {
+ const IndexImmediate& imm) {
builder_->TableSet(imm.index, index.node, value.node, decoder->position());
}
@@ -478,6 +475,12 @@ class WasmGraphBuildingInterface {
Forward(decoder, obj, result);
}
+ void AssertNotNull(FullDecoder* decoder, const Value& obj, Value* result) {
+ builder_->TrapIfTrue(wasm::TrapReason::kTrapIllegalCast,
+ builder_->IsNull(obj.node), decoder->position());
+ Forward(decoder, obj, result);
+ }
+
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {}
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
@@ -567,11 +570,11 @@ class WasmGraphBuildingInterface {
SetEnv(fenv);
}
- void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
+ void BrTable(FullDecoder* decoder, const BranchTableImmediate& imm,
const Value& key) {
if (imm.table_count == 0) {
// Only a default target. Do the equivalent of br.
- uint32_t target = BranchTableIterator<validate>(decoder, imm).next();
+ uint32_t target = BranchTableIterator<ValidationTag>(decoder, imm).next();
BrOrRet(decoder, target, 1);
return;
}
@@ -582,7 +585,7 @@ class WasmGraphBuildingInterface {
SsaEnv* copy = Steal(decoder->zone(), branch_env);
SetEnv(copy);
- BranchTableIterator<validate> iterator(decoder, imm);
+ BranchTableIterator<ValidationTag> iterator(decoder, imm);
while (iterator.has_next()) {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
@@ -604,7 +607,7 @@ class WasmGraphBuildingInterface {
}
void LoadMem(FullDecoder* decoder, LoadType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const MemoryAccessImmediate& imm, const Value& index,
Value* result) {
SetAndTypeNode(result, builder_->LoadMem(
type.value_type(), type.mem_type(), index.node,
@@ -613,8 +616,8 @@ class WasmGraphBuildingInterface {
void LoadTransform(FullDecoder* decoder, LoadType type,
LoadTransformationKind transform,
- const MemoryAccessImmediate<validate>& imm,
- const Value& index, Value* result) {
+ const MemoryAccessImmediate& imm, const Value& index,
+ Value* result) {
SetAndTypeNode(result,
builder_->LoadTransform(type.value_type(), type.mem_type(),
transform, index.node, imm.offset,
@@ -622,7 +625,7 @@ class WasmGraphBuildingInterface {
}
void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
- const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const Value& index, const MemoryAccessImmediate& imm,
const uint8_t laneidx, Value* result) {
SetAndTypeNode(
result, builder_->LoadLane(
@@ -631,14 +634,14 @@ class WasmGraphBuildingInterface {
}
void StoreMem(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const MemoryAccessImmediate& imm, const Value& index,
const Value& value) {
builder_->StoreMem(type.mem_rep(), index.node, imm.offset, imm.alignment,
value.node, decoder->position(), type.value_type());
}
void StoreLane(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const MemoryAccessImmediate& imm, const Value& index,
const Value& value, const uint8_t laneidx) {
builder_->StoreLane(type.mem_rep(), index.node, imm.offset, imm.alignment,
value.node, laneidx, decoder->position(),
@@ -655,8 +658,7 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_, decoder);
}
- void CallDirect(FullDecoder* decoder,
- const CallFunctionImmediate<validate>& imm,
+ void CallDirect(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[], Value returns[]) {
int maybe_call_count = -1;
if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
@@ -668,8 +670,7 @@ class WasmGraphBuildingInterface {
args, returns);
}
- void ReturnCall(FullDecoder* decoder,
- const CallFunctionImmediate<validate>& imm,
+ void ReturnCall(FullDecoder* decoder, const CallFunctionImmediate& imm,
const Value args[]) {
int maybe_call_count = -1;
if (v8_flags.wasm_speculative_inlining && type_feedback_.size() > 0) {
@@ -682,8 +683,8 @@ class WasmGraphBuildingInterface {
}
void CallIndirect(FullDecoder* decoder, const Value& index,
- const CallIndirectImmediate<validate>& imm,
- const Value args[], Value returns[]) {
+ const CallIndirectImmediate& imm, const Value args[],
+ Value returns[]) {
DoCall(
decoder,
CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
@@ -691,7 +692,7 @@ class WasmGraphBuildingInterface {
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
- const CallIndirectImmediate<validate>& imm,
+ const CallIndirectImmediate& imm,
const Value args[]) {
DoReturnCall(
decoder,
@@ -886,23 +887,22 @@ class WasmGraphBuildingInterface {
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
- const SimdLaneImmediate<validate>& imm,
- base::Vector<Value> inputs, Value* result) {
+ const SimdLaneImmediate& imm, base::Vector<Value> inputs,
+ Value* result) {
NodeVector nodes(inputs.size());
GetNodes(nodes.begin(), inputs);
SetAndTypeNode(result,
builder_->SimdLaneOp(opcode, imm.lane, nodes.begin()));
}
- void Simd8x16ShuffleOp(FullDecoder* decoder,
- const Simd128Immediate<validate>& imm,
+ void Simd8x16ShuffleOp(FullDecoder* decoder, const Simd128Immediate& imm,
const Value& input0, const Value& input1,
Value* result) {
TFNode* input_nodes[] = {input0.node, input1.node};
SetAndTypeNode(result, builder_->Simd8x16ShuffleOp(imm.value, input_nodes));
}
- void Throw(FullDecoder* decoder, const TagIndexImmediate<validate>& imm,
+ void Throw(FullDecoder* decoder, const TagIndexImmediate& imm,
const base::Vector<Value>& value_args) {
int count = value_args.length();
ZoneVector<TFNode*> args(count, decoder->zone());
@@ -911,7 +911,8 @@ class WasmGraphBuildingInterface {
}
CheckForException(decoder,
builder_->Throw(imm.index, imm.tag, base::VectorOf(args),
- decoder->position()));
+ decoder->position()),
+ kDontReloadContext);
builder_->TerminateThrow(effect(), control());
}
@@ -919,13 +920,13 @@ class WasmGraphBuildingInterface {
DCHECK(block->is_try_catchall() || block->is_try_catch());
TFNode* exception = block->try_info->exception;
DCHECK_NOT_NULL(exception);
- CheckForException(decoder, builder_->Rethrow(exception));
+ CheckForException(decoder, builder_->Rethrow(exception),
+ kDontReloadContext);
builder_->TerminateThrow(effect(), control());
}
- void CatchException(FullDecoder* decoder,
- const TagIndexImmediate<validate>& imm, Control* block,
- base::Vector<Value> values) {
+ void CatchException(FullDecoder* decoder, const TagIndexImmediate& imm,
+ Control* block, base::Vector<Value> values) {
DCHECK(block->is_try_catch());
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
@@ -937,10 +938,6 @@ class WasmGraphBuildingInterface {
TFNode* exception = block->try_info->exception;
SetEnv(block->try_info->catch_env);
- if (block->try_info->first_catch) {
- LoadContextIntoSsa(ssa_env_, decoder);
- block->try_info->first_catch = false;
- }
TFNode* if_catch = nullptr;
TFNode* if_no_catch = nullptr;
@@ -1018,14 +1015,11 @@ class WasmGraphBuildingInterface {
}
SetEnv(block->try_info->catch_env);
- if (block->try_info->first_catch) {
- LoadContextIntoSsa(ssa_env_, decoder);
- }
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode,
- base::Vector<Value> args,
- const MemoryAccessImmediate<validate>& imm, Value* result) {
+ base::Vector<Value> args, const MemoryAccessImmediate& imm,
+ Value* result) {
NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
TFNode* node = builder_->AtomicOp(opcode, inputs.begin(), imm.alignment,
@@ -1035,65 +1029,61 @@ class WasmGraphBuildingInterface {
void AtomicFence(FullDecoder* decoder) { builder_->AtomicFence(); }
- void MemoryInit(FullDecoder* decoder,
- const MemoryInitImmediate<validate>& imm, const Value& dst,
- const Value& src, const Value& size) {
+ void MemoryInit(FullDecoder* decoder, const MemoryInitImmediate& imm,
+ const Value& dst, const Value& src, const Value& size) {
builder_->MemoryInit(imm.data_segment.index, dst.node, src.node, size.node,
decoder->position());
}
- void DataDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
+ void DataDrop(FullDecoder* decoder, const IndexImmediate& imm) {
builder_->DataDrop(imm.index, decoder->position());
}
- void MemoryCopy(FullDecoder* decoder,
- const MemoryCopyImmediate<validate>& imm, const Value& dst,
- const Value& src, const Value& size) {
+ void MemoryCopy(FullDecoder* decoder, const MemoryCopyImmediate& imm,
+ const Value& dst, const Value& src, const Value& size) {
builder_->MemoryCopy(dst.node, src.node, size.node, decoder->position());
}
- void MemoryFill(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm, const Value& dst,
- const Value& value, const Value& size) {
+ void MemoryFill(FullDecoder* decoder, const MemoryIndexImmediate& imm,
+ const Value& dst, const Value& value, const Value& size) {
builder_->MemoryFill(dst.node, value.node, size.node, decoder->position());
}
- void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
+ void TableInit(FullDecoder* decoder, const TableInitImmediate& imm,
base::Vector<Value> args) {
builder_->TableInit(imm.table.index, imm.element_segment.index,
args[0].node, args[1].node, args[2].node,
decoder->position());
}
- void ElemDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
+ void ElemDrop(FullDecoder* decoder, const IndexImmediate& imm) {
builder_->ElemDrop(imm.index, decoder->position());
}
- void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
+ void TableCopy(FullDecoder* decoder, const TableCopyImmediate& imm,
base::Vector<Value> args) {
builder_->TableCopy(imm.table_dst.index, imm.table_src.index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
- void TableGrow(FullDecoder* decoder, const IndexImmediate<validate>& imm,
+ void TableGrow(FullDecoder* decoder, const IndexImmediate& imm,
const Value& value, const Value& delta, Value* result) {
SetAndTypeNode(result,
builder_->TableGrow(imm.index, value.node, delta.node));
}
- void TableSize(FullDecoder* decoder, const IndexImmediate<validate>& imm,
+ void TableSize(FullDecoder* decoder, const IndexImmediate& imm,
Value* result) {
SetAndTypeNode(result, builder_->TableSize(imm.index));
}
- void TableFill(FullDecoder* decoder, const IndexImmediate<validate>& imm,
+ void TableFill(FullDecoder* decoder, const IndexImmediate& imm,
const Value& start, const Value& value, const Value& count) {
builder_->TableFill(imm.index, start.node, value.node, count.node);
}
- void StructNew(FullDecoder* decoder,
- const StructIndexImmediate<validate>& imm, const Value& rtt,
- const Value args[], Value* result) {
+ void StructNew(FullDecoder* decoder, const StructIndexImmediate& imm,
+ const Value& rtt, const Value args[], Value* result) {
uint32_t field_count = imm.struct_type->field_count();
NodeVector arg_nodes(field_count);
for (uint32_t i = 0; i < field_count; i++) {
@@ -1103,8 +1093,7 @@ class WasmGraphBuildingInterface {
builder_->StructNew(imm.index, imm.struct_type, rtt.node,
base::VectorOf(arg_nodes)));
}
- void StructNewDefault(FullDecoder* decoder,
- const StructIndexImmediate<validate>& imm,
+ void StructNewDefault(FullDecoder* decoder, const StructIndexImmediate& imm,
const Value& rtt, Value* result) {
uint32_t field_count = imm.struct_type->field_count();
NodeVector arg_nodes(field_count);
@@ -1119,8 +1108,7 @@ class WasmGraphBuildingInterface {
}
void StructGet(FullDecoder* decoder, const Value& struct_object,
- const FieldImmediate<validate>& field, bool is_signed,
- Value* result) {
+ const FieldImmediate& field, bool is_signed, Value* result) {
SetAndTypeNode(result, builder_->StructGet(struct_object.node,
field.struct_imm.struct_type,
field.field_imm.index,
@@ -1129,14 +1117,13 @@ class WasmGraphBuildingInterface {
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
- const FieldImmediate<validate>& field,
- const Value& field_value) {
+ const FieldImmediate& field, const Value& field_value) {
builder_->StructSet(struct_object.node, field.struct_imm.struct_type,
field.field_imm.index, field_value.node,
NullCheckFor(struct_object.type), decoder->position());
}
- void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate& imm,
const Value& length, const Value& initial_value,
const Value& rtt, Value* result) {
SetAndTypeNode(result, builder_->ArrayNew(imm.index, imm.array_type,
@@ -1147,8 +1134,7 @@ class WasmGraphBuildingInterface {
if (!loop_infos_.empty()) loop_infos_.back().can_be_innermost = false;
}
- void ArrayNewDefault(FullDecoder* decoder,
- const ArrayIndexImmediate<validate>& imm,
+ void ArrayNewDefault(FullDecoder* decoder, const ArrayIndexImmediate& imm,
const Value& length, const Value& rtt, Value* result) {
// This will be set in {builder_}.
TFNode* initial_value = nullptr;
@@ -1158,7 +1144,7 @@ class WasmGraphBuildingInterface {
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
- const ArrayIndexImmediate<validate>& imm, const Value& index,
+ const ArrayIndexImmediate& imm, const Value& index,
bool is_signed, Value* result) {
SetAndTypeNode(
result, builder_->ArrayGet(array_obj.node, imm.array_type, index.node,
@@ -1167,7 +1153,7 @@ class WasmGraphBuildingInterface {
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
- const ArrayIndexImmediate<validate>& imm, const Value& index,
+ const ArrayIndexImmediate& imm, const Value& index,
const Value& value) {
builder_->ArraySet(array_obj.node, imm.array_type, index.node, value.node,
NullCheckFor(array_obj.type), decoder->position());
@@ -1187,8 +1173,7 @@ class WasmGraphBuildingInterface {
length.node, decoder->position());
}
- void ArrayNewFixed(FullDecoder* decoder,
- const ArrayIndexImmediate<validate>& imm,
+ void ArrayNewFixed(FullDecoder* decoder, const ArrayIndexImmediate& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
NodeVector element_nodes(elements.size());
@@ -1200,10 +1185,9 @@ class WasmGraphBuildingInterface {
}
void ArrayNewSegment(FullDecoder* decoder,
- const ArrayIndexImmediate<validate>& array_imm,
- const IndexImmediate<validate>& data_segment,
- const Value& offset, const Value& length,
- const Value& rtt, Value* result) {
+ const ArrayIndexImmediate& array_imm,
+ const IndexImmediate& data_segment, const Value& offset,
+ const Value& length, const Value& rtt, Value* result) {
SetAndTypeNode(result,
builder_->ArrayNewSegment(
array_imm.array_type, data_segment.index, offset.node,
@@ -1232,26 +1216,12 @@ class WasmGraphBuildingInterface {
using WasmTypeCheckConfig = v8::internal::compiler::WasmTypeCheckConfig;
- WasmTypeCheckConfig ComputeWasmTypeCheckConfig(ValueType object_type,
- ValueType rtt_type,
- const WasmModule* module,
- bool null_succeeds) {
- WasmTypeCheckConfig result;
- result.object_can_be_null = object_type.is_nullable();
- DCHECK(object_type.is_object_reference()); // Checked by validation.
- result.null_succeeds = null_succeeds;
- // In the bottom case, the result is irrelevant.
- result.rtt_depth = rtt_type.is_bottom()
- ? 0 /* unused */
- : static_cast<uint8_t>(GetSubtypingDepth(
- module, rtt_type.ref_index()));
- return result;
- }
-
void RefTest(FullDecoder* decoder, const Value& object, const Value& rtt,
Value* result, bool null_succeeds) {
- WasmTypeCheckConfig config = ComputeWasmTypeCheckConfig(
- object.type, rtt.type, decoder->module_, null_succeeds);
+ WasmTypeCheckConfig config = {
+ object.type,
+ ValueType::RefMaybeNull(rtt.type.ref_index(),
+ null_succeeds ? kNullable : kNonNullable)};
SetAndTypeNode(result, builder_->RefTest(object.node, rtt.node, config));
}
@@ -1262,11 +1232,11 @@ class WasmGraphBuildingInterface {
}
void RefCast(FullDecoder* decoder, const Value& object, const Value& rtt,
- Value* result) {
- // TODO(mliedtke): Should be a parameter for generic ref.cast instructions.
- const bool null_succeeds = false;
- WasmTypeCheckConfig config = ComputeWasmTypeCheckConfig(
- object.type, rtt.type, decoder->module_, null_succeeds);
+ Value* result, bool null_succeeds) {
+ WasmTypeCheckConfig config = {
+ object.type,
+ ValueType::RefMaybeNull(rtt.type.ref_index(),
+ null_succeeds ? kNullable : kNonNullable)};
TFNode* cast_node = v8_flags.experimental_wasm_assume_ref_cast_succeeds
? builder_->TypeGuard(object.node, result->type)
: builder_->RefCast(object.node, rtt.node, config,
@@ -1274,17 +1244,27 @@ class WasmGraphBuildingInterface {
SetAndTypeNode(result, cast_node);
}
+ void RefCastAbstract(FullDecoder* decoder, const Value& object,
+ wasm::HeapType type, Value* result, bool null_succeeds) {
+ TFNode* node = object.node;
+ if (!v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
+ node = builder_->RefCastAbstract(object.node, type, decoder->position(),
+ null_succeeds);
+ }
+ SetAndTypeNode(result, builder_->TypeGuard(node, result->type));
+ }
+
template <void (compiler::WasmGraphBuilder::*branch_function)(
TFNode*, TFNode*, WasmTypeCheckConfig, TFNode**, TFNode**, TFNode**,
TFNode**)>
void BrOnCastAbs(FullDecoder* decoder, const Value& object, const Value& rtt,
Value* forwarding_value, uint32_t br_depth,
bool branch_on_match) {
- // TODO(mliedtke): Should be a parameter for generic br_on_cast
- // instructions.
- const bool null_succeeds = false;
- WasmTypeCheckConfig config = ComputeWasmTypeCheckConfig(
- object.type, rtt.type, decoder->module_, null_succeeds);
+ // TODO(mliedtke): Add generic br_on_cast instructions where null succeeds.
+ WasmTypeCheckConfig config = {object.type,
+ !rtt.type.is_bottom()
+ ? ValueType::Ref(rtt.type.ref_index())
+ : kWasmBottom};
SsaEnv* branch_env = Split(decoder->zone(), ssa_env_);
SsaEnv* no_branch_env = Steal(decoder->zone(), ssa_env_);
no_branch_env->SetNotMerged();
@@ -1322,30 +1302,32 @@ class WasmGraphBuildingInterface {
null_succeeds));
}
- void RefIsData(FullDecoder* decoder, const Value& object, Value* result) {
+ void RefIsStruct(FullDecoder* decoder, const Value& object, Value* result) {
bool null_succeeds = false;
SetAndTypeNode(result,
- builder_->RefIsData(object.node, object.type.is_nullable(),
- null_succeeds));
+ builder_->RefIsStruct(object.node, object.type.is_nullable(),
+ null_succeeds));
}
- void RefAsData(FullDecoder* decoder, const Value& object, Value* result) {
- TFNode* cast_object = builder_->RefAsData(
- object.node, object.type.is_nullable(), decoder->position());
+ void RefAsStruct(FullDecoder* decoder, const Value& object, Value* result) {
+ bool null_succeeds = false;
+ TFNode* cast_object =
+ builder_->RefAsStruct(object.node, object.type.is_nullable(),
+ decoder->position(), null_succeeds);
TFNode* rename = builder_->TypeGuard(cast_object, result->type);
SetAndTypeNode(result, rename);
}
- void BrOnData(FullDecoder* decoder, const Value& object,
- Value* value_on_branch, uint32_t br_depth) {
- BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnData>(
+ void BrOnStruct(FullDecoder* decoder, const Value& object,
+ Value* value_on_branch, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnStruct>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
true);
}
- void BrOnNonData(FullDecoder* decoder, const Value& object,
- Value* value_on_fallthrough, uint32_t br_depth) {
- BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnData>(
+ void BrOnNonStruct(FullDecoder* decoder, const Value& object,
+ Value* value_on_fallthrough, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnStruct>(
decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
br_depth, false);
}
@@ -1358,8 +1340,10 @@ class WasmGraphBuildingInterface {
}
void RefAsArray(FullDecoder* decoder, const Value& object, Value* result) {
- TFNode* cast_object = builder_->RefAsArray(
- object.node, object.type.is_nullable(), decoder->position());
+ bool null_succeeds = false;
+ TFNode* cast_object =
+ builder_->RefAsArray(object.node, object.type.is_nullable(),
+ decoder->position(), null_succeeds);
TFNode* rename = builder_->TypeGuard(cast_object, result->type);
SetAndTypeNode(result, rename);
}
@@ -1384,7 +1368,9 @@ class WasmGraphBuildingInterface {
}
void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
- TFNode* cast_object = builder_->RefAsI31(object.node, decoder->position());
+ bool null_succeeds = false;
+ TFNode* cast_object =
+ builder_->RefAsI31(object.node, decoder->position(), null_succeeds);
TFNode* rename = builder_->TypeGuard(cast_object, result->type);
SetAndTypeNode(result, rename);
}
@@ -1403,8 +1389,7 @@ class WasmGraphBuildingInterface {
br_depth, false);
}
- void StringNewWtf8(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& memory,
+ void StringNewWtf8(FullDecoder* decoder, const MemoryIndexImmediate& memory,
const unibrow::Utf8Variant variant, const Value& offset,
const Value& size, Value* result) {
SetAndTypeNode(result, builder_->StringNewWtf8(memory.index, variant,
@@ -1419,8 +1404,7 @@ class WasmGraphBuildingInterface {
start.node, end.node));
}
- void StringNewWtf16(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ void StringNewWtf16(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const Value& offset, const Value& size, Value* result) {
SetAndTypeNode(result,
builder_->StringNewWtf16(imm.index, offset.node, size.node));
@@ -1433,8 +1417,8 @@ class WasmGraphBuildingInterface {
end.node));
}
- void StringConst(FullDecoder* decoder,
- const StringConstImmediate<validate>& imm, Value* result) {
+ void StringConst(FullDecoder* decoder, const StringConstImmediate& imm,
+ Value* result) {
SetAndTypeNode(result, builder_->StringConst(imm.index));
}
@@ -1461,7 +1445,7 @@ class WasmGraphBuildingInterface {
}
void StringEncodeWtf8(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& memory,
+ const MemoryIndexImmediate& memory,
const unibrow::Utf8Variant variant, const Value& str,
const Value& offset, Value* result) {
result->node = builder_->StringEncodeWtf8(memory.index, variant, str.node,
@@ -1478,8 +1462,7 @@ class WasmGraphBuildingInterface {
NullCheckFor(array.type), start.node, decoder->position());
}
- void StringEncodeWtf16(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
+ void StringEncodeWtf16(FullDecoder* decoder, const MemoryIndexImmediate& imm,
const Value& str, const Value& offset, Value* result) {
result->node =
builder_->StringEncodeWtf16(imm.index, str.node, NullCheckFor(str.type),
@@ -1529,7 +1512,7 @@ class WasmGraphBuildingInterface {
}
void StringViewWtf8Encode(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& memory,
+ const MemoryIndexImmediate& memory,
const unibrow::Utf8Variant variant,
const Value& view, const Value& addr,
const Value& pos, const Value& bytes,
@@ -1552,9 +1535,9 @@ class WasmGraphBuildingInterface {
// Since we implement stringview_wtf16 as string, that's the type we'll
// use for the Node. (The decoder's Value type must be stringview_wtf16
// because static type validation relies on it.)
- result->node =
- builder_->SetType(builder_->RefAsNonNull(str.node, decoder->position()),
- ValueType::Ref(HeapType::kString));
+ result->node = builder_->SetType(
+ builder_->AssertNotNull(str.node, decoder->position()),
+ ValueType::Ref(HeapType::kString));
}
void StringViewWtf16GetCodeUnit(FullDecoder* decoder, const Value& view,
@@ -1564,10 +1547,9 @@ class WasmGraphBuildingInterface {
}
void StringViewWtf16Encode(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
- const Value& view, const Value& offset,
- const Value& pos, const Value& codeunits,
- Value* result) {
+ const MemoryIndexImmediate& imm, const Value& view,
+ const Value& offset, const Value& pos,
+ const Value& codeunits, Value* result) {
result->node = builder_->StringViewWtf16Encode(
imm.index, view.node, NullCheckFor(view.type), offset.node, pos.node,
codeunits.node, decoder->position());
@@ -1704,7 +1686,10 @@ class WasmGraphBuildingInterface {
builder_->set_instance_cache(&env->instance_cache);
}
- TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
+ enum ReloadContextAfterException { kDontReloadContext, kReloadContext };
+
+ TFNode* CheckForException(FullDecoder* decoder, TFNode* node,
+ ReloadContextAfterException reload_mode) {
DCHECK_NOT_NULL(node);
// We need to emit IfSuccess/IfException nodes if this node throws and has
@@ -1730,6 +1715,13 @@ class WasmGraphBuildingInterface {
exception_env->effect = if_exception;
SetEnv(exception_env);
+ // If the exceptional operation could have modified memory size, we need to
+ // reload the memory context into the exceptional control path.
+ if (reload_mode == kReloadContext &&
+ decoder->module_->initial_pages != decoder->module_->maximum_pages) {
+ LoadContextIntoSsa(ssa_env_, decoder);
+ }
+
if (emit_loop_exits()) {
ValueVector values;
BuildNestedLoopExits(decoder,
@@ -1972,28 +1964,29 @@ class WasmGraphBuildingInterface {
arg_nodes[i + 1] = args[i].node;
}
switch (call_info.call_mode()) {
- case CallInfo::kCallIndirect:
- CheckForException(
- decoder, builder_->CallIndirect(
- call_info.table_index(), call_info.sig_index(),
- base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes), decoder->position()));
+ case CallInfo::kCallIndirect: {
+ TFNode* call = builder_->CallIndirect(
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes), base::VectorOf(return_nodes),
+ decoder->position());
+ CheckForException(decoder, call, kReloadContext);
break;
+ }
case CallInfo::kCallDirect: {
TFNode* call = builder_->CallDirect(
call_info.callee_index(), base::VectorOf(arg_nodes),
base::VectorOf(return_nodes), decoder->position());
builder_->StoreCallCount(call, call_info.call_count());
- CheckForException(decoder, call);
+ CheckForException(decoder, call, kReloadContext);
break;
}
- case CallInfo::kCallRef:
- CheckForException(
- decoder,
- builder_->CallRef(sig, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes),
- call_info.null_check(), decoder->position()));
+ case CallInfo::kCallRef: {
+ TFNode* call = builder_->CallRef(
+ sig, base::VectorOf(arg_nodes), base::VectorOf(return_nodes),
+ call_info.null_check(), decoder->position());
+ CheckForException(decoder, call, kReloadContext);
break;
+ }
}
for (size_t i = 0; i < return_count; ++i) {
SetAndTypeNode(&returns[i], return_nodes[i]);
@@ -2116,9 +2109,8 @@ class WasmGraphBuildingInterface {
CheckForNull NullCheckFor(ValueType type) {
DCHECK(type.is_object_reference());
- return (!v8_flags.experimental_wasm_skip_null_checks && type.is_nullable())
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
+ return type.is_nullable() ? CheckForNull::kWithNullCheck
+ : CheckForNull::kWithoutNullCheck;
}
void SetAndTypeNode(Value* value, TFNode* node) {
@@ -2138,7 +2130,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
compiler::NodeOriginTable* node_origins,
int func_index, InlinedStatus inlined_status) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
+ WasmFullDecoder<Decoder::NoValidationTag, WasmGraphBuildingInterface> decoder(
&zone, module, enabled, detected, body, builder, func_index,
inlined_status);
if (node_origins) {
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 10575a7e31..ab8b49027e 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -5,9 +5,7 @@
#include "src/wasm/module-compiler.h"
#include <algorithm>
-#include <mutex> // NOLINT(build/c++11)
#include <queue>
-#include <shared_mutex>
#include "src/api/api-inl.h"
#include "src/base/enum-set.h"
@@ -25,6 +23,7 @@
#include "src/wasm/assembler-buffer-cache.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/pgo.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
@@ -138,14 +137,14 @@ class CompilationUnitQueues {
Queue* GetQueueForTask(int task_id) {
int required_queues = task_id + 1;
{
- std::shared_lock<std::shared_mutex> queues_guard{queues_mutex_};
+ base::SharedMutexGuard<base::kShared> queues_guard{&queues_mutex_};
if (V8_LIKELY(static_cast<int>(queues_.size()) >= required_queues)) {
return queues_[task_id].get();
}
}
// Otherwise increase the number of queues.
- std::unique_lock<std::shared_mutex> queues_guard{queues_mutex_};
+ base::SharedMutexGuard<base::kExclusive> queues_guard{&queues_mutex_};
int num_queues = static_cast<int>(queues_.size());
while (num_queues < required_queues) {
int steal_from = num_queues + 1;
@@ -200,7 +199,7 @@ class CompilationUnitQueues {
QueueImpl* queue;
{
int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
- std::shared_lock<std::shared_mutex> queues_guard{queues_mutex_};
+ base::SharedMutexGuard<base::kShared> queues_guard{&queues_mutex_};
while (!next_queue_to_add.compare_exchange_weak(
queue_to_add, next_task_id(queue_to_add, queues_.size()),
std::memory_order_relaxed)) {
@@ -234,7 +233,7 @@ class CompilationUnitQueues {
}
void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) {
- std::shared_lock<std::shared_mutex> queues_guard{queues_mutex_};
+ base::SharedMutexGuard<base::kShared> queues_guard{&queues_mutex_};
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
// Priorities should only be seen as a hint here; without balancing, we
@@ -382,7 +381,7 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
{
- std::shared_lock<std::shared_mutex> guard{queues_mutex_};
+ base::SharedMutexGuard<base::kShared> guard{&queues_mutex_};
for (size_t steal_trials = 0; steal_trials < queues_.size();
++steal_trials, ++steal_task_id) {
if (steal_task_id >= static_cast<int>(queues_.size())) {
@@ -439,7 +438,7 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
{
- std::shared_lock<std::shared_mutex> guard{queues_mutex_};
+ base::SharedMutexGuard<base::kShared> guard{&queues_mutex_};
for (size_t steal_trials = 0; steal_trials < queues_.size();
++steal_trials, ++steal_task_id) {
if (steal_task_id >= static_cast<int>(queues_.size())) {
@@ -514,7 +513,7 @@ class CompilationUnitQueues {
}
// {queues_mutex_} protectes {queues_};
- std::shared_mutex queues_mutex_;
+ base::SharedMutex queues_mutex_;
std::vector<std::unique_ptr<QueueImpl>> queues_;
const int num_declared_functions_;
@@ -565,11 +564,16 @@ class CompilationStateImpl {
void ApplyCompilationHintToInitialProgress(const WasmCompilationHint& hint,
size_t hint_idx);
+ // Use PGO information to choose a better initial compilation progress
+ // (tiering decisions).
+ void ApplyPgoInfoToInitialProgress(ProfileInformation* pgo_info);
+
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before
// {CommitCompilationUnits} is invoked which triggers background compilation.
void InitializeCompilationProgress(int num_import_wrappers,
- int num_export_wrappers);
+ int num_export_wrappers,
+ ProfileInformation* pgo_info);
// Initialize the compilation progress after deserialization. This is needed
// for recompilation (e.g. for tier down) to work later.
@@ -1109,12 +1113,13 @@ enum OnlyLazyFunctions : bool {
void ValidateSequentially(
const WasmModule* module, NativeModule* native_module, Counters* counters,
- AccountingAllocator* allocator, ErrorThrower* thrower, bool lazy_module,
+ AccountingAllocator* allocator, ErrorThrower* thrower,
OnlyLazyFunctions only_lazy_functions = kAllFunctions) {
DCHECK(!thrower->error());
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
auto enabled_features = native_module->enabled_features();
+ bool lazy_module = v8_flags.wasm_lazy_compilation;
for (uint32_t func_index = start; func_index < end; func_index++) {
// Skip non-lazy functions if requested.
if (only_lazy_functions) {
@@ -1163,10 +1168,11 @@ class CompileLazyTimingScope {
} // namespace
-bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
+bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
int func_index) {
- Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
- NativeModule* native_module = module_object->native_module();
+ DisallowGarbageCollection no_gc;
+ WasmModuleObject module_object = instance.module_object();
+ NativeModule* native_module = module_object.native_module();
Counters* counters = isolate->counters();
// Put the timer scope around everything, including the {CodeSpaceWriteScope}
@@ -1225,12 +1231,11 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) {
- DisallowGarbageCollection no_gc;
- Object url_obj = module_object->script().name();
+ Object url_obj = module_object.script().name();
DCHECK(url_obj.IsString() || url_obj.IsUndefined());
std::unique_ptr<char[]> url =
url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
- code->LogCode(isolate, url.get(), module_object->script().id());
+ code->LogCode(isolate, url.get(), module_object.script().id());
}
counters->wasm_lazily_compiled_functions()->Increment();
@@ -1243,17 +1248,6 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
compilation_state->CommitTopTierCompilationUnit(tiering_unit);
}
-
- // Allocate feedback vector if needed.
- int feedback_vector_slots = NumFeedbackSlots(module, func_index);
- if (feedback_vector_slots > 0) {
- DCHECK(v8_flags.wasm_speculative_inlining);
- Handle<FixedArray> vector =
- isolate->factory()->NewFixedArrayWithZeroes(feedback_vector_slots);
- instance->feedback_vectors().set(
- declared_function_index(module, func_index), *vector);
- }
-
return true;
}
@@ -1790,7 +1784,8 @@ void InitializeLazyCompilation(NativeModule* native_module) {
}
std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
- Isolate* isolate, NativeModule* native_module) {
+ Isolate* isolate, NativeModule* native_module,
+ ProfileInformation* pgo_info) {
InitializeLazyCompilation(native_module);
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
@@ -1798,21 +1793,21 @@ std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
int num_import_wrappers = AddImportWrapperUnits(native_module, builder.get());
int num_export_wrappers =
AddExportWrapperUnits(isolate, native_module, builder.get());
- compilation_state->InitializeCompilationProgress(num_import_wrappers,
- num_export_wrappers);
+ compilation_state->InitializeCompilationProgress(
+ num_import_wrappers, num_export_wrappers, pgo_info);
return builder;
}
bool MayCompriseLazyFunctions(const WasmModule* module,
- const WasmFeatures& enabled_features,
- bool lazy_module) {
- if (lazy_module || enabled_features.has_compilation_hints()) return true;
+ const WasmFeatures& enabled_features) {
+ if (IsLazyModule(module)) return true;
+ if (enabled_features.has_compilation_hints()) return true;
#ifdef ENABLE_SLOW_DCHECKS
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
for (int func_index = start; func_index < end; func_index++) {
SLOW_DCHECK(GetCompileStrategy(module, enabled_features, func_index,
- lazy_module) != CompileStrategy::kLazy);
+ false) != CompileStrategy::kLazy);
}
#endif
return false;
@@ -1893,19 +1888,18 @@ class CompilationTimeCallback : public CompilationEventCallback {
void CompileNativeModule(Isolate* isolate,
v8::metrics::Recorder::ContextId context_id,
ErrorThrower* thrower, const WasmModule* wasm_module,
- std::shared_ptr<NativeModule> native_module) {
+ std::shared_ptr<NativeModule> native_module,
+ ProfileInformation* pgo_info) {
CHECK(!v8_flags.jitless);
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const bool lazy_module = IsLazyModule(wasm_module);
if (!v8_flags.wasm_lazy_validation && wasm_module->origin == kWasmOrigin &&
- MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(),
- lazy_module)) {
+ MayCompriseLazyFunctions(wasm_module,
+ native_module->enabled_features())) {
// Validate wasm modules for lazy compilation if requested. Never validate
// asm.js modules as these are valid by construction (additionally a CHECK
// will catch this during lazy compilation).
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
- isolate->allocator(), thrower, lazy_module,
- kOnlyLazyFunctions);
+ isolate->allocator(), thrower, kOnlyLazyFunctions);
// On error: Return and leave the module in an unexecutable state.
if (thrower->error()) return;
}
@@ -1922,21 +1916,21 @@ void CompileNativeModule(Isolate* isolate,
// Initialize the compilation units and kick off background compile tasks.
std::unique_ptr<CompilationUnitBuilder> builder =
- InitializeCompilation(isolate, native_module.get());
+ InitializeCompilation(isolate, native_module.get(), pgo_info);
compilation_state->InitializeCompilationUnits(std::move(builder));
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedExportWrappers);
if (compilation_state->failed()) {
- DCHECK_IMPLIES(lazy_module, !v8_flags.wasm_lazy_validation);
+ DCHECK_IMPLIES(IsLazyModule(wasm_module), !v8_flags.wasm_lazy_validation);
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
- isolate->allocator(), thrower, lazy_module);
+ isolate->allocator(), thrower);
CHECK(thrower->error());
return;
}
- compilation_state->FinalizeJSToWasmWrappers(isolate, native_module->module());
+ compilation_state->FinalizeJSToWasmWrappers(isolate, wasm_module);
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedBaselineCompilation);
@@ -1944,9 +1938,9 @@ void CompileNativeModule(Isolate* isolate,
compilation_state->PublishDetectedFeatures(isolate);
if (compilation_state->failed()) {
- DCHECK_IMPLIES(lazy_module, !v8_flags.wasm_lazy_validation);
+ DCHECK_IMPLIES(IsLazyModule(wasm_module), !v8_flags.wasm_lazy_validation);
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
- isolate->allocator(), thrower, lazy_module);
+ isolate->allocator(), thrower);
CHECK(thrower->error());
}
}
@@ -1988,7 +1982,8 @@ class BackgroundCompileJob final : public JobTask {
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- int compilation_id, v8::metrics::Recorder::ContextId context_id) {
+ int compilation_id, v8::metrics::Recorder::ContextId context_id,
+ ProfileInformation* pgo_info) {
const WasmModule* wasm_module = module.get();
WasmEngine* engine = GetWasmEngine();
base::OwnedVector<uint8_t> wire_bytes_copy =
@@ -2029,7 +2024,8 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
// Sync compilation is user blocking, so we increase the priority.
native_module->compilation_state()->SetHighPriority();
- CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module);
+ CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module,
+ pgo_info);
bool cache_hit = !engine->UpdateNativeModuleCache(thrower->error(),
&native_module, isolate);
if (thrower->error()) return {};
@@ -2092,7 +2088,6 @@ AsyncCompileJob::AsyncCompileJob(
api_method_name_(api_method_name),
enabled_features_(enabled),
dynamic_tiering_(DynamicTiering{v8_flags.wasm_dynamic_tiering.value()}),
- wasm_lazy_compilation_(v8_flags.wasm_lazy_compilation),
start_time_(base::TimeTicks::Now()),
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
@@ -2282,7 +2277,7 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
true, // streamed
is_after_cache_hit, // cached
is_after_deserialization, // deserialized
- wasm_lazy_compilation_, // lazy
+ v8_flags.wasm_lazy_compilation, // lazy
!compilation_state->failed(), // success
native_module_->turbofan_code_size(), // code_size_in_bytes
native_module_->liftoff_bailout_count(), // liftoff_bailout_count
@@ -2351,10 +2346,8 @@ void AsyncCompileJob::DecodeFailed(const WasmError& error) {
void AsyncCompileJob::AsyncCompileFailed() {
ErrorThrower thrower(isolate_, api_method_name_);
DCHECK_EQ(native_module_->module()->origin, kWasmOrigin);
- const bool lazy_module = wasm_lazy_compilation_;
ValidateSequentially(native_module_->module(), native_module_.get(),
- isolate_->counters(), isolate_->allocator(), &thrower,
- lazy_module);
+ isolate_->counters(), isolate_->allocator(), &thrower);
DCHECK(thrower.error());
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
@@ -2556,6 +2549,38 @@ void AsyncCompileJob::NextStep(Args&&... args) {
step_.reset(new Step(std::forward<Args>(args)...));
}
+WasmError ValidateLazilyCompiledFunctions(const WasmModule* module,
+ ModuleWireBytes wire_bytes,
+ WasmFeatures enabled_features) {
+ if (v8_flags.wasm_lazy_validation) return {};
+ if (!MayCompriseLazyFunctions(module, enabled_features)) return {};
+
+ auto allocator = GetWasmEngine()->allocator();
+
+ // TODO(clemensb): Parallelize this.
+ const bool is_lazy_module = IsLazyModule(module);
+ for (const WasmFunction& function : module->declared_functions()) {
+ if (module->function_was_validated(function.func_index)) continue;
+ base::Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(&function);
+
+ CompileStrategy strategy = GetCompileStrategy(
+ module, enabled_features, function.func_index, is_lazy_module);
+ if (strategy != CompileStrategy::kLazy &&
+ strategy != CompileStrategy::kLazyBaselineEagerTopTier) {
+ continue;
+ }
+ DecodeResult function_result = ValidateSingleFunction(
+ module, function.func_index, code, allocator, enabled_features);
+ if (function_result.failed()) {
+ WasmError error = std::move(function_result).error();
+ return GetWasmErrorWithName(wire_bytes, &function, module,
+ std::move(error));
+ }
+ module->set_function_validated(function.func_index);
+ }
+ return {};
+}
+
//==========================================================================
// Step 1: (async) Decode the module.
//==========================================================================
@@ -2581,37 +2606,12 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
DecodingMethod::kAsync, GetWasmEngine()->allocator());
// Validate lazy functions here if requested.
- if (!v8_flags.wasm_lazy_validation && result.ok()) {
+ if (result.ok()) {
const WasmModule* module = result.value().get();
- DCHECK_EQ(module->origin, kWasmOrigin);
- const bool lazy_module = job->wasm_lazy_compilation_;
- if (MayCompriseLazyFunctions(module, enabled_features, lazy_module)) {
- auto allocator = GetWasmEngine()->allocator();
- int start = module->num_imported_functions;
- int end = start + module->num_declared_functions;
-
- for (int func_index = start; func_index < end; func_index++) {
- const WasmFunction* func = &module->functions[func_index];
- base::Vector<const uint8_t> code =
- job->wire_bytes_.GetFunctionBytes(func);
-
- CompileStrategy strategy = GetCompileStrategy(
- module, enabled_features, func_index, lazy_module);
- bool validate_lazily_compiled_function =
- strategy == CompileStrategy::kLazy ||
- strategy == CompileStrategy::kLazyBaselineEagerTopTier;
- if (validate_lazily_compiled_function) {
- DecodeResult function_result = ValidateSingleFunction(
- module, func_index, code, allocator, enabled_features);
- if (function_result.failed()) {
- WasmError error = function_result.error();
- WasmError error_with_name = GetWasmErrorWithName(
- job->wire_bytes_, func, module, std::move(error));
- result = ModuleResult(std::move(error_with_name));
- break;
- }
- }
- }
+ WasmError validation_error = ValidateLazilyCompiledFunctions(
+ module, job->wire_bytes_, job->enabled_features_);
+ if (validation_error.has_error()) {
+ result = ModuleResult{std::move(validation_error)};
}
}
}
@@ -2675,6 +2675,25 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
code_size_estimate_)) {
job->FinishCompile(true);
return;
+ } else {
+ // If we are not streaming and did not get a cache hit, we might have hit
+ // the path where the streaming decoder got a prefix cache hit, but the
+ // module then turned out to be invalid, and we are running it through
+ // non-streaming decoding again. In this case, function bodies have not
+ // been validated yet (would have happened in the {DecodeModule} phase
+ // if we would not come via the non-streaming path). Thus do this now.
+ // Note that we only need to validate lazily compiled functions, others
+ // will be validated during eager compilation.
+ DCHECK(start_compilation_);
+ if (ValidateLazilyCompiledFunctions(
+ module_.get(), ModuleWireBytes{job->native_module_->wire_bytes()},
+ job->native_module_->enabled_features())
+ .has_error()) {
+ // TODO(clemensb): Use the error message instead of re-validation in
+ // {AsyncCompileFailed}.
+ job->AsyncCompileFailed();
+ return;
+ }
}
// Make sure all compilation tasks stopped running. Decoding (async step)
@@ -2695,12 +2714,14 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
if (start_compilation_) {
- std::unique_ptr<CompilationUnitBuilder> builder =
- InitializeCompilation(job->isolate(), job->native_module_.get());
+ // TODO(13209): Use PGO for async compilation, if available.
+ constexpr ProfileInformation* kNoProfileInformation = nullptr;
+ std::unique_ptr<CompilationUnitBuilder> builder = InitializeCompilation(
+ job->isolate(), job->native_module_.get(), kNoProfileInformation);
compilation_state->InitializeCompilationUnits(std::move(builder));
- // We are in single-threaded mode, so there are no worker tasks that will
- // do the compilation. We call {WaitForCompilationEvent} here so that the
- // main thread paticipates and finishes the compilation.
+ // In single-threaded mode there are no worker tasks that will do the
+ // compilation. We call {WaitForCompilationEvent} here so that the main
+ // thread participates and finishes the compilation.
if (v8_flags.wasm_num_compilation_tasks == 0) {
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedBaselineCompilation);
@@ -2915,8 +2936,10 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_ = 2;
- compilation_unit_builder_ =
- InitializeCompilation(job_->isolate(), job_->native_module_.get());
+ // TODO(13209): Use PGO for streaming compilation, if available.
+ constexpr ProfileInformation* kNoProfileInformation = nullptr;
+ compilation_unit_builder_ = InitializeCompilation(
+ job_->isolate(), job_->native_module_.get(), kNoProfileInformation);
return true;
}
@@ -2930,7 +2953,7 @@ void AsyncStreamingProcessor::ProcessFunctionBody(
// In case of {prefix_cache_hit} we still need the function body to be
// decoded. Otherwise a later cache miss cannot be handled.
decoder_.DecodeFunctionBody(func_index, static_cast<uint32_t>(bytes.length()),
- offset, false);
+ offset);
if (prefix_cache_hit_) {
// Don't compile yet if we might have a cache hit.
@@ -2948,7 +2971,7 @@ void AsyncStreamingProcessor::ProcessFunctionBody(
const WasmModule* module = decoder_.module();
auto enabled_features = job_->enabled_features_;
DCHECK_EQ(module->origin, kWasmOrigin);
- const bool lazy_module = job_->wasm_lazy_compilation_;
+ const bool lazy_module = v8_flags.wasm_lazy_compilation;
CompileStrategy strategy =
GetCompileStrategy(module, enabled_features, func_index, lazy_module);
bool validate_lazily_compiled_function =
@@ -3181,8 +3204,49 @@ void CompilationStateImpl::ApplyCompilationHintToInitialProgress(
(old_baseline_tier != ExecutionTier::kNone);
}
+void CompilationStateImpl::ApplyPgoInfoToInitialProgress(
+ ProfileInformation* pgo_info) {
+ // Functions that were executed in the profiling run are eagerly compiled to
+ // Liftoff.
+ const WasmModule* module = native_module_->module();
+ for (int func_index : pgo_info->executed_functions()) {
+ uint8_t& progress =
+ compilation_progress_[declared_function_index(module, func_index)];
+ ExecutionTier old_baseline_tier =
+ RequiredBaselineTierField::decode(progress);
+ // If the function is already marked for eager compilation, we are good.
+ if (old_baseline_tier != ExecutionTier::kNone) continue;
+
+ // Set the baseline tier to Liftoff, so we eagerly compile to Liftoff.
+ // TODO(13288): Compile Liftoff code in the background, if lazy compilation
+ // is enabled.
+ progress =
+ RequiredBaselineTierField::update(progress, ExecutionTier::kLiftoff);
+ ++outstanding_baseline_units_;
+ }
+
+ // Functions that were tiered up during PGO generation are eagerly compiled to
+ // TurboFan (in the background, not blocking instantiation).
+ for (int func_index : pgo_info->tiered_up_functions()) {
+ uint8_t& progress =
+ compilation_progress_[declared_function_index(module, func_index)];
+ ExecutionTier old_baseline_tier =
+ RequiredBaselineTierField::decode(progress);
+ ExecutionTier old_top_tier = RequiredTopTierField::decode(progress);
+ // If the function is already marked for eager or background compilation to
+ // TurboFan, we are good.
+ if (old_baseline_tier == ExecutionTier::kTurbofan) continue;
+ if (old_top_tier == ExecutionTier::kTurbofan) continue;
+
+ // Set top tier to TurboFan, so we eagerly trigger compilation in the
+ // background.
+ progress = RequiredTopTierField::update(progress, ExecutionTier::kTurbofan);
+ }
+}
+
void CompilationStateImpl::InitializeCompilationProgress(
- int num_import_wrappers, int num_export_wrappers) {
+ int num_import_wrappers, int num_export_wrappers,
+ ProfileInformation* pgo_info) {
DCHECK(!failed());
auto* module = native_module_->module();
@@ -3213,6 +3277,9 @@ void CompilationStateImpl::InitializeCompilationProgress(
}
}
+ // Apply PGO information, if available.
+ if (pgo_info) ApplyPgoInfoToInitialProgress(pgo_info);
+
// Account for outstanding wrapper compilation.
outstanding_baseline_units_ += num_import_wrappers;
outstanding_export_wrappers_ = num_export_wrappers;
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 4c3b350046..0e1c33b257 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -49,6 +49,7 @@ class CompilationResultResolver;
class ErrorThrower;
class ModuleCompiler;
class NativeModule;
+class ProfileInformation;
class StreamingDecoder;
class WasmCode;
struct WasmModule;
@@ -57,7 +58,8 @@ V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- int compilation_id, v8::metrics::Recorder::ContextId context_id);
+ int compilation_id, v8::metrics::Recorder::ContextId context_id,
+ ProfileInformation* pgo_info);
void RecompileNativeModule(NativeModule* native_module,
TieringState new_tiering_state);
@@ -78,7 +80,7 @@ WasmCode* CompileImportWrapper(
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
// compilation was successful. Lazy compilation can fail only if validation is
// also lazy.
-bool CompileLazy(Isolate*, Handle<WasmInstanceObject>, int func_index);
+bool CompileLazy(Isolate*, WasmInstanceObject, int func_index);
// Throws the compilation error after failed lazy compilation.
void ThrowLazyCompilationError(Isolate* isolate,
@@ -258,7 +260,6 @@ class AsyncCompileJob {
const char* const api_method_name_;
const WasmFeatures enabled_features_;
const DynamicTiering dynamic_tiering_;
- const bool wasm_lazy_compilation_;
base::TimeTicks start_time_;
// Copy of the module wire bytes, moved into the {native_module_} on its
// creation.
diff --git a/deps/v8/src/wasm/module-decoder-impl.h b/deps/v8/src/wasm/module-decoder-impl.h
index aeebf2633e..9cecb23da3 100644
--- a/deps/v8/src/wasm/module-decoder-impl.h
+++ b/deps/v8/src/wasm/module-decoder-impl.h
@@ -470,8 +470,7 @@ class ModuleDecoderTemplate : public Decoder {
}
void DecodeSection(SectionCode section_code,
- base::Vector<const uint8_t> bytes, uint32_t offset,
- bool validate_functions = true) {
+ base::Vector<const uint8_t> bytes, uint32_t offset) {
if (failed()) return;
Reset(bytes, offset);
TRACE("Section: %s\n", SectionName(section_code));
@@ -507,7 +506,7 @@ class ModuleDecoderTemplate : public Decoder {
DecodeStartSection();
break;
case kCodeSectionCode:
- DecodeCodeSection(validate_functions);
+ DecodeCodeSection();
break;
case kElementSectionCode:
DecodeElementSection();
@@ -562,13 +561,7 @@ class ModuleDecoderTemplate : public Decoder {
DecodeDataCountSection();
break;
case kTagSectionCode:
- if (enabled_features_.has_eh()) {
- DecodeTagSection();
- } else {
- errorf(pc(),
- "unexpected section <%s> (enable with --experimental-wasm-eh)",
- SectionName(section_code));
- }
+ DecodeTagSection();
break;
case kStringRefSectionCode:
if (enabled_features_.has_stringref()) {
@@ -640,7 +633,7 @@ class ModuleDecoderTemplate : public Decoder {
TypeDefinition consume_subtype_definition() {
DCHECK(enabled_features_.has_gc());
- uint8_t kind = read_u8<Decoder::kFullValidation>(pc(), "type kind");
+ uint8_t kind = read_u8<Decoder::FullValidationTag>(pc(), "type kind");
if (kind == kWasmSubtypeCode) {
consume_bytes(1, " subtype, ", tracer_);
constexpr uint32_t kMaximumSupertypes = 1;
@@ -672,7 +665,8 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; i < types_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- uint8_t opcode = read_u8<kFullValidation>(pc(), "signature definition");
+ uint8_t opcode =
+ read_u8<FullValidationTag>(pc(), "signature definition");
tracer_.Bytes(pc_, 1);
tracer_.TypeOffset(pc_offset());
tracer_.Description(" kind: ");
@@ -706,7 +700,7 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < types_count; ++i) {
TRACE("DecodeType[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
- uint8_t kind = read_u8<Decoder::kFullValidation>(pc(), "type kind");
+ uint8_t kind = read_u8<Decoder::FullValidationTag>(pc(), "type kind");
if (kind == kWasmRecursiveTypeGroupCode) {
consume_bytes(1, "rec. group definition", tracer_);
tracer_.NextLine();
@@ -809,30 +803,32 @@ class ModuleDecoderTemplate : public Decoder {
break;
}
table->type = type;
- uint8_t flags = validate_table_flags("element count");
+ consume_table_flags("element count", &table->has_maximum_size);
consume_resizable_limits(
"element count", "elements", std::numeric_limits<uint32_t>::max(),
- &table->initial_size, &table->has_maximum_size,
+ &table->initial_size, table->has_maximum_size,
std::numeric_limits<uint32_t>::max(), &table->maximum_size,
- flags);
+ k32BitLimits);
break;
}
case kExternalMemory: {
// ===== Imported memory =============================================
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
- &module_->is_memory64);
+ consume_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64,
+ &module_->has_maximum_pages);
uint32_t max_pages = module_->is_memory64 ? kSpecMaxMemory64Pages
: kSpecMaxMemory32Pages;
- consume_resizable_limits("memory", "pages", max_pages,
- &module_->initial_pages,
- &module_->has_maximum_pages, max_pages,
- &module_->maximum_pages, flags);
+ consume_resizable_limits(
+ "memory", "pages", max_pages, &module_->initial_pages,
+ module_->has_maximum_pages, max_pages, &module_->maximum_pages,
+ module_->is_memory64 ? k64BitLimits : k32BitLimits);
break;
}
case kExternalGlobal: {
// ===== Imported global =============================================
import->index = static_cast<uint32_t>(module_->globals.size());
+ module_->num_imported_globals++;
module_->globals.push_back({kWasmVoid, false, {}, {0}, true, false});
WasmGlobal* global = &module_->globals.back();
global->type = consume_value_type();
@@ -845,11 +841,8 @@ class ModuleDecoderTemplate : public Decoder {
}
case kExternalTag: {
// ===== Imported tag ================================================
- if (!enabled_features_.has_eh()) {
- errorf(pos, "unknown import kind 0x%02x", import->kind);
- break;
- }
import->index = static_cast<uint32_t>(module_->tags.size());
+ module_->num_imported_tags++;
const WasmTagSig* tag_sig = nullptr;
consume_exception_attribute(); // Attribute ignored for now.
consume_tag_sig_index(module_.get(), &tag_sig);
@@ -875,23 +868,19 @@ class ModuleDecoderTemplate : public Decoder {
DCHECK_EQ(module_->functions.size(), module_->num_imported_functions);
uint32_t total_function_count =
module_->num_imported_functions + functions_count;
- module_->functions.reserve(total_function_count);
+ module_->functions.resize(total_function_count);
module_->num_declared_functions = functions_count;
- for (uint32_t i = 0; i < functions_count; ++i) {
- uint32_t func_index = static_cast<uint32_t>(module_->functions.size());
- module_->functions.push_back({nullptr, // sig
- func_index, // func_index
- 0, // sig_index
- {0, 0}, // code
- false, // imported
- false, // exported
- false}); // declared
- WasmFunction* function = &module_->functions.back();
- tracer_.FunctionName(module_->num_imported_functions + i);
+ DCHECK_NULL(module_->validated_functions);
+ module_->validated_functions =
+ std::make_unique<std::atomic<uint8_t>[]>((functions_count + 7) / 8);
+ for (uint32_t func_index = module_->num_imported_functions;
+ func_index < total_function_count; ++func_index) {
+ WasmFunction* function = &module_->functions[func_index];
+ function->func_index = func_index;
+ tracer_.FunctionName(func_index);
function->sig_index = consume_sig_index(module_.get(), &function->sig);
if (!ok()) return;
}
- DCHECK_EQ(module_->functions.size(), total_function_count);
}
void DecodeTableSection() {
@@ -905,7 +894,7 @@ class ModuleDecoderTemplate : public Decoder {
bool has_initializer = false;
if (enabled_features_.has_typed_funcref() &&
- read_u8<Decoder::kFullValidation>(
+ read_u8<Decoder::FullValidationTag>(
pc(), "table-with-initializer byte") == 0x40) {
consume_bytes(1, "table-with-initializer byte");
has_initializer = true;
@@ -924,11 +913,12 @@ class ModuleDecoderTemplate : public Decoder {
}
table->type = table_type;
- uint8_t flags = validate_table_flags("table elements");
- consume_resizable_limits(
- "table elements", "elements", std::numeric_limits<uint32_t>::max(),
- &table->initial_size, &table->has_maximum_size,
- std::numeric_limits<uint32_t>::max(), &table->maximum_size, flags);
+ consume_table_flags("table elements", &table->has_maximum_size);
+ consume_resizable_limits("table elements", "elements",
+ std::numeric_limits<uint32_t>::max(),
+ &table->initial_size, table->has_maximum_size,
+ std::numeric_limits<uint32_t>::max(),
+ &table->maximum_size, k32BitLimits);
if (has_initializer) {
table->initial_value = consume_init_expr(module_.get(), table_type);
@@ -942,14 +932,14 @@ class ModuleDecoderTemplate : public Decoder {
for (uint32_t i = 0; ok() && i < memory_count; i++) {
tracer_.MemoryOffset(pc_offset());
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
- &module_->is_memory64);
+ consume_memory_flags(&module_->has_shared_memory, &module_->is_memory64,
+ &module_->has_maximum_pages);
uint32_t max_pages =
module_->is_memory64 ? kSpecMaxMemory64Pages : kSpecMaxMemory32Pages;
- consume_resizable_limits("memory", "pages", max_pages,
- &module_->initial_pages,
- &module_->has_maximum_pages, max_pages,
- &module_->maximum_pages, flags);
+ consume_resizable_limits(
+ "memory", "pages", max_pages, &module_->initial_pages,
+ module_->has_maximum_pages, max_pages, &module_->maximum_pages,
+ module_->is_memory64 ? k64BitLimits : k32BitLimits);
}
}
@@ -1034,10 +1024,6 @@ class ModuleDecoderTemplate : public Decoder {
break;
}
case kExternalTag: {
- if (!enabled_features_.has_eh()) {
- errorf(pos, "invalid export kind 0x%02x", exp->kind);
- break;
- }
WasmTag* tag = nullptr;
exp->index = consume_tag_index(module_.get(), &tag);
break;
@@ -1118,7 +1104,7 @@ class ModuleDecoderTemplate : public Decoder {
}
}
- void DecodeCodeSection(bool validate_functions) {
+ void DecodeCodeSection() {
// Make sure global offset were calculated before they get accessed during
// function compilation.
CalculateGlobalOffsets(module_.get());
@@ -1148,7 +1134,7 @@ class ModuleDecoderTemplate : public Decoder {
uint32_t offset = pc_offset();
consume_bytes(size, "function body");
if (failed()) break;
- DecodeFunctionBody(function_index, size, offset, validate_functions);
+ DecodeFunctionBody(function_index, size, offset);
// Now that the function has been decoded, we can compute module offsets.
for (; inst_traces_it != this->inst_traces_.end() &&
@@ -1191,17 +1177,11 @@ class ModuleDecoderTemplate : public Decoder {
return true;
}
- void DecodeFunctionBody(uint32_t index, uint32_t length, uint32_t offset,
- bool validate_functions) {
- WasmFunction* function = &module_->functions[index];
+ void DecodeFunctionBody(uint32_t func_index, uint32_t length,
+ uint32_t offset) {
+ WasmFunction* function = &module_->functions[func_index];
function->code = {offset, length};
tracer_.FunctionBody(function, pc_ - (pc_offset() - offset));
- if (validate_functions) {
- ModuleWireBytes bytes(module_start_, module_end_);
- ValidateFunctionBody(module_->signature_zone->allocator(),
- index + module_->num_imported_functions, bytes,
- module_.get(), function);
- }
}
bool CheckDataSegmentsCount(uint32_t data_segments_count) {
@@ -1639,9 +1619,20 @@ class ModuleDecoderTemplate : public Decoder {
return toResult(std::move(module_));
}
+ void ValidateAllFunctions() {
+ DCHECK(ok());
+
+ // Spawn a {ValidateFunctionsTask} and join it. The earliest error found
+ // will be set on this decoder.
+ std::unique_ptr<JobHandle> job_handle = V8::GetCurrentPlatform()->CreateJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<ValidateFunctionsTask>(this));
+ job_handle->Join();
+ }
+
// Decodes an entire module.
ModuleResult DecodeModule(Counters* counters, AccountingAllocator* allocator,
- bool validate_functions = true) {
+ bool validate_functions) {
StartDecoding(counters, allocator);
uint32_t offset = 0;
base::Vector<const byte> orig_bytes(start(), end() - start());
@@ -1660,7 +1651,7 @@ class ModuleDecoderTemplate : public Decoder {
offset += section_iter.payload_start() - section_iter.section_start();
if (section_iter.section_code() != SectionCode::kUnknownSectionCode) {
DecodeSection(section_iter.section_code(), section_iter.payload(),
- offset, validate_functions);
+ offset);
}
// Shift the offset by the remaining section payload
offset += section_iter.payload_length();
@@ -1668,6 +1659,11 @@ class ModuleDecoderTemplate : public Decoder {
section_iter.advance(true);
}
+ if (ok() && validate_functions) {
+ Reset(orig_bytes);
+ ValidateAllFunctions();
+ }
+
if (v8_flags.dump_wasm_module) DumpModule(orig_bytes);
if (decoder.failed()) {
@@ -1680,16 +1676,25 @@ class ModuleDecoderTemplate : public Decoder {
// Decodes a single anonymous function starting at {start_}.
FunctionResult DecodeSingleFunctionForTesting(
Zone* zone, const ModuleWireBytes& wire_bytes, const WasmModule* module) {
+ DCHECK(ok());
pc_ = start_;
expect_u8("type form", kWasmFunctionTypeCode);
WasmFunction function;
function.sig = consume_sig(zone);
function.code = {off(pc_), static_cast<uint32_t>(end_ - pc_)};
- if (!ok()) return FunctionResult{std::move(error_)};
- ValidateFunctionBody(zone->allocator(), 0, wire_bytes, module, &function);
if (!ok()) return FunctionResult{std::move(error_)};
+ AccountingAllocator* allocator = zone->allocator();
+
+ FunctionBody body{function.sig, off(pc_), pc_, end_};
+
+ WasmFeatures unused_detected_features;
+ DecodeResult result = ValidateFunctionBody(
+ allocator, enabled_features_, module, &unused_detected_features, body);
+
+ if (result.failed()) return FunctionResult{std::move(result).error()};
+
return FunctionResult{std::make_unique<WasmFunction>(function)};
}
@@ -1803,37 +1808,6 @@ class ModuleDecoderTemplate : public Decoder {
module->tagged_globals_buffer_size = tagged_offset;
}
- // Verifies the body (code) of a given function.
- void ValidateFunctionBody(AccountingAllocator* allocator, uint32_t func_num,
- const ModuleWireBytes& wire_bytes,
- const WasmModule* module, WasmFunction* function) {
- if (v8_flags.trace_wasm_decoder) {
- WasmFunctionName func_name(function,
- wire_bytes.GetNameOrNull(function, module));
- StdoutStream{} << "Verifying wasm function " << func_name << std::endl;
- }
- FunctionBody body = {
- function->sig, function->code.offset(),
- start_ + GetBufferRelativeOffset(function->code.offset()),
- start_ + GetBufferRelativeOffset(function->code.end_offset())};
-
- WasmFeatures unused_detected_features = WasmFeatures::None();
- DecodeResult result = wasm::ValidateFunctionBody(
- allocator, enabled_features_, module, &unused_detected_features, body);
-
- // If the decode failed and this is the first error, set error code and
- // location.
- if (result.failed() && error_.empty()) {
- // Wrap the error message from the function decoder.
- WasmFunctionName func_name(function,
- wire_bytes.GetNameOrNull(function, module));
- std::ostringstream error_msg;
- error_msg << "in function " << func_name << ": "
- << result.error().message();
- error_ = WasmError{result.error().offset(), error_msg.str()};
- }
- }
-
uint32_t consume_sig_index(WasmModule* module, const FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
@@ -1909,74 +1883,65 @@ class ModuleDecoderTemplate : public Decoder {
return index;
}
- uint8_t validate_table_flags(const char* name) {
+ void consume_table_flags(const char* name, bool* has_maximum_out) {
tracer_.Bytes(pc_, 1);
uint8_t flags = consume_u8("table limits flags");
tracer_.Description(flags == kNoMaximum ? " no maximum" : " with maximum");
tracer_.NextLine();
- static_assert(kNoMaximum < kWithMaximum);
+ static_assert(kNoMaximum == 0 && kWithMaximum == 1);
+ *has_maximum_out = flags == kWithMaximum;
if (V8_UNLIKELY(flags > kWithMaximum)) {
errorf(pc() - 1, "invalid %s limits flags", name);
}
- return flags;
}
- uint8_t validate_memory_flags(bool* has_shared_memory, bool* is_memory64) {
+ void consume_memory_flags(bool* is_shared_out, bool* is_memory64_out,
+ bool* has_maximum_out) {
tracer_.Bytes(pc_, 1);
uint8_t flags = consume_u8("memory limits flags");
- *has_shared_memory = false;
- switch (flags) {
- case kNoMaximum:
- case kWithMaximum:
- break;
- case kSharedNoMaximum:
- case kSharedWithMaximum:
- if (!enabled_features_.has_threads()) {
- errorf(pc() - 1,
- "invalid memory limits flags 0x%x (enable via "
- "--experimental-wasm-threads)",
- flags);
- }
- *has_shared_memory = true;
- // V8 does not support shared memory without a maximum.
- if (flags == kSharedNoMaximum) {
- errorf(pc() - 1,
- "memory limits flags must have maximum defined if shared is "
- "true");
- }
- break;
- case kMemory64NoMaximum:
- case kMemory64WithMaximum:
- if (!enabled_features_.has_memory64()) {
- errorf(pc() - 1,
- "invalid memory limits flags 0x%x (enable via "
- "--experimental-wasm-memory64)",
- flags);
- }
- *is_memory64 = true;
- break;
- default:
- errorf(pc() - 1, "invalid memory limits flags 0x%x", flags);
- break;
- }
- if (*has_shared_memory) tracer_.Description(" shared");
- if (*is_memory64) tracer_.Description(" mem64");
- tracer_.Description((flags & 1) ? " with maximum" : " no maximum");
+ // Flags 0..7 are valid (3 bits).
+ if (flags & ~0x7) {
+ errorf(pc() - 1, "invalid memory limits flags 0x%x", flags);
+ }
+ // Decode the three bits.
+ bool has_maximum = flags & 0x1;
+ bool is_shared = flags & 0x2;
+ bool is_memory64 = flags & 0x4;
+ // Store into output parameters.
+ *has_maximum_out = has_maximum;
+ *is_shared_out = is_shared;
+ *is_memory64_out = is_memory64;
+
+ // V8 does not support shared memory without a maximum.
+ if (is_shared && !has_maximum) {
+ errorf(pc() - 1, "shared memory must have a maximum defined");
+ }
+
+ if (is_memory64 && !enabled_features_.has_memory64()) {
+ errorf(pc() - 1,
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-memory64)",
+ flags);
+ }
+
+ // Tracing.
+ if (is_shared) tracer_.Description(" shared");
+ if (is_memory64) tracer_.Description(" mem64");
+ tracer_.Description(has_maximum ? " with maximum" : " no maximum");
tracer_.NextLine();
- return flags;
}
+ enum ResizableLimitsType : bool { k32BitLimits, k64BitLimits };
void consume_resizable_limits(const char* name, const char* units,
uint32_t max_initial, uint32_t* initial,
- bool* has_max, uint32_t max_maximum,
- uint32_t* maximum, uint8_t flags) {
+ bool has_maximum, uint32_t max_maximum,
+ uint32_t* maximum, ResizableLimitsType type) {
const byte* pos = pc();
- // For memory64 we need to read the numbers as LEB-encoded 64-bit unsigned
- // integer. All V8 limits are still within uint32_t range though.
- const bool is_memory64 =
- flags == kMemory64NoMaximum || flags == kMemory64WithMaximum;
- uint64_t initial_64 = is_memory64 ? consume_u64v("initial size", tracer_)
- : consume_u32v("initial size", tracer_);
+ // Note that even if we read the values as 64-bit value, all V8 limits are
+ // still within uint32_t range.
+ uint64_t initial_64 = type == k64BitLimits
+ ? consume_u64v("initial size", tracer_)
+ : consume_u32v("initial size", tracer_);
if (initial_64 > max_initial) {
errorf(pos,
"initial %s size (%" PRIu64
@@ -1986,11 +1951,11 @@ class ModuleDecoderTemplate : public Decoder {
*initial = static_cast<uint32_t>(initial_64);
tracer_.Description(*initial);
tracer_.NextLine();
- if (flags & 1) {
- *has_max = true;
+ if (has_maximum) {
pos = pc();
- uint64_t maximum_64 = is_memory64 ? consume_u64v("maximum size", tracer_)
- : consume_u32v("maximum size", tracer_);
+ uint64_t maximum_64 = type == k64BitLimits
+ ? consume_u64v("maximum size", tracer_)
+ : consume_u32v("maximum size", tracer_);
if (maximum_64 > max_maximum) {
errorf(pos,
"maximum %s size (%" PRIu64
@@ -2006,7 +1971,6 @@ class ModuleDecoderTemplate : public Decoder {
tracer_.Description(*maximum);
tracer_.NextLine();
} else {
- *has_max = false;
*maximum = max_initial;
}
}
@@ -2048,7 +2012,7 @@ class ModuleDecoderTemplate : public Decoder {
switch (static_cast<WasmOpcode>(*pc())) {
case kExprI32Const: {
int32_t value =
- read_i32v<kFullValidation>(pc() + 1, &length, "i32.const");
+ read_i32v<FullValidationTag>(pc() + 1, &length, "i32.const");
if (V8_UNLIKELY(failed())) return {};
if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
TYPE_CHECK(kWasmI32)
@@ -2060,7 +2024,7 @@ class ModuleDecoderTemplate : public Decoder {
}
case kExprRefFunc: {
uint32_t index =
- read_u32v<kFullValidation>(pc() + 1, &length, "ref.func");
+ read_u32v<FullValidationTag>(pc() + 1, &length, "ref.func");
if (V8_UNLIKELY(failed())) return {};
if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
if (V8_UNLIKELY(index >= module_->functions.size())) {
@@ -2080,8 +2044,10 @@ class ModuleDecoderTemplate : public Decoder {
break;
}
case kExprRefNull: {
- HeapType type = value_type_reader::read_heap_type<kFullValidation>(
- this, pc() + 1, &length, module_.get(), enabled_features_);
+ HeapType type = value_type_reader::read_heap_type<FullValidationTag>(
+ this, pc() + 1, &length, enabled_features_);
+ value_type_reader::ValidateHeapType<FullValidationTag>(
+ this, pc_, module_.get(), type);
if (V8_UNLIKELY(failed())) return {};
if (V8_LIKELY(lookahead(1 + length, kExprEnd))) {
TYPE_CHECK(ValueType::RefNull(type))
@@ -2100,7 +2066,7 @@ class ModuleDecoderTemplate : public Decoder {
auto sig = FixedSizeSignature<ValueType>::Returns(expected);
FunctionBody body(&sig, buffer_offset_, pc_, end_);
WasmFeatures detected;
- WasmFullDecoder<Decoder::kFullValidation, ConstantExpressionInterface,
+ WasmFullDecoder<Decoder::FullValidationTag, ConstantExpressionInterface,
kConstantExpression>
decoder(&init_expr_zone_, module, enabled_features_, &detected, body,
module);
@@ -2139,9 +2105,11 @@ class ModuleDecoderTemplate : public Decoder {
ValueType consume_value_type() {
uint32_t type_length;
- ValueType result = value_type_reader::read_value_type<kFullValidation>(
- this, pc_, &type_length, module_.get(),
+ ValueType result = value_type_reader::read_value_type<FullValidationTag>(
+ this, pc_, &type_length,
origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
+ value_type_reader::ValidateValueType<FullValidationTag>(
+ this, pc_, module_.get(), result);
tracer_.Bytes(pc_, type_length);
tracer_.Description(result);
consume_bytes(type_length, "value type");
@@ -2150,8 +2118,10 @@ class ModuleDecoderTemplate : public Decoder {
HeapType consume_super_type() {
uint32_t type_length;
- HeapType result = value_type_reader::read_heap_type<kFullValidation>(
- this, pc_, &type_length, module_.get(), enabled_features_);
+ HeapType result = value_type_reader::read_heap_type<FullValidationTag>(
+ this, pc_, &type_length, enabled_features_);
+ value_type_reader::ValidateValueType<FullValidationTag>(
+ this, pc_, module_.get(), result);
tracer_.Bytes(pc_, type_length);
tracer_.Description(result);
consume_bytes(type_length, "heap type");
@@ -2159,7 +2129,7 @@ class ModuleDecoderTemplate : public Decoder {
}
ValueType consume_storage_type() {
- uint8_t opcode = read_u8<kFullValidation>(this->pc());
+ uint8_t opcode = read_u8<FullValidationTag>(this->pc());
switch (opcode) {
case kI8Code:
consume_bytes(1, " i8", tracer_);
@@ -2428,6 +2398,86 @@ class ModuleDecoderTemplate : public Decoder {
func->declared = true;
return index;
}
+
+ // A task that validates multiple functions in parallel, storing the earliest
+ // validation error in {this} decoder.
+ class ValidateFunctionsTask : public JobTask {
+ public:
+ ValidateFunctionsTask(ModuleDecoderTemplate* decoder)
+ : decoder_(decoder),
+ next_function_(decoder->module_->num_imported_functions),
+ after_last_function_(next_function_ +
+ decoder->module_->num_declared_functions) {}
+
+ void Run(JobDelegate* delegate) override {
+ AccountingAllocator* allocator = decoder_->module_->allocator();
+ do {
+ // Get the index of the next function to validate.
+ // {fetch_add} might overrun {after_last_function_} by a bit. Since the
+ // number of functions is limited to a value much smaller than the
+ // integer range, this is highly unlikely.
+ static_assert(kV8MaxWasmFunctions < kMaxInt / 2);
+ int func_index = next_function_.fetch_add(1, std::memory_order_relaxed);
+ if (V8_UNLIKELY(func_index >= after_last_function_)) return;
+ DCHECK_LE(0, func_index);
+
+ if (!ValidateFunction(allocator, func_index)) {
+ // No need to validate any more functions.
+ next_function_.store(after_last_function_, std::memory_order_relaxed);
+ return;
+ }
+ } while (!delegate->ShouldYield());
+ }
+
+ size_t GetMaxConcurrency(size_t /* worker_count */) const override {
+ int next_func = next_function_.load(std::memory_order_relaxed);
+ return std::max(0, after_last_function_ - next_func);
+ }
+
+ private:
+ // Validate a single function; use {SetError} on errors.
+ bool ValidateFunction(AccountingAllocator* allocator, int func_index) {
+ DCHECK(!decoder_->module_->function_was_validated(func_index));
+ WasmFeatures unused_detected_features;
+ const WasmFunction& function = decoder_->module_->functions[func_index];
+ FunctionBody body{function.sig, function.code.offset(),
+ decoder_->start_ + function.code.offset(),
+ decoder_->start_ + function.code.end_offset()};
+ DecodeResult validation_result = ValidateFunctionBody(
+ allocator, decoder_->enabled_features_, decoder_->module_.get(),
+ &unused_detected_features, body);
+ if (V8_UNLIKELY(validation_result.failed())) {
+ SetError(func_index, std::move(validation_result).error());
+ return false;
+ }
+ decoder_->module_->set_function_validated(func_index);
+ return true;
+ }
+
+ // Set the error from the argument if it's earlier than the error we already
+ // have (or if we have none yet). Thread-safe.
+ void SetError(int func_index, WasmError error) {
+ base::MutexGuard mutex_guard{&set_error_mutex_};
+ if (decoder_->error_.empty() ||
+ decoder_->error_.offset() > error.offset()) {
+ // Wrap the error message from the function decoder.
+ const WasmFunction& function = decoder_->module_->functions[func_index];
+ WasmFunctionName func_name{
+ &function,
+ ModuleWireBytes{decoder_->start_, decoder_->end_}.GetNameOrNull(
+ &function, decoder_->module_.get())};
+ std::ostringstream error_msg;
+ error_msg << "in function " << func_name << ": " << error.message();
+ decoder_->error_ = WasmError{error.offset(), error_msg.str()};
+ }
+ DCHECK(!decoder_->ok());
+ }
+
+ ModuleDecoderTemplate* decoder_;
+ base::Mutex set_error_mutex_;
+ std::atomic<int> next_function_;
+ const int after_last_function_;
+ };
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 2c70e2068d..21a5bf2f2f 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -177,9 +177,8 @@ void ModuleDecoder::DecodeSection(SectionCode section_code,
}
void ModuleDecoder::DecodeFunctionBody(uint32_t index, uint32_t length,
- uint32_t offset,
- bool validate_functions) {
- impl_->DecodeFunctionBody(index, length, offset, validate_functions);
+ uint32_t offset) {
+ impl_->DecodeFunctionBody(index, length, offset);
}
void ModuleDecoder::StartCodeSection(WireBytesRef section_bytes) {
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index c32fafa301..5123dc75be 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -155,8 +155,7 @@ class ModuleDecoder {
bool CheckFunctionsCount(uint32_t functions_count, uint32_t error_offset);
- void DecodeFunctionBody(uint32_t index, uint32_t size, uint32_t offset,
- bool verify_functions = true);
+ void DecodeFunctionBody(uint32_t index, uint32_t size, uint32_t offset);
ModuleResult FinishDecoding();
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 2b6cd36ebb..d3dcda955c 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -732,25 +732,15 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Allocate type feedback vectors for functions.
+ // Allocate the array that will hold type feedback vectors.
//--------------------------------------------------------------------------
if (v8_flags.wasm_speculative_inlining) {
int num_functions = static_cast<int>(module_->num_declared_functions);
- Handle<FixedArray> vectors =
- isolate_->factory()->NewFixedArray(num_functions, AllocationType::kOld);
+ // Zero-fill the array so we can do a quick Smi-check to test if a given
+ // slot was initialized.
+ Handle<FixedArray> vectors = isolate_->factory()->NewFixedArrayWithZeroes(
+ num_functions, AllocationType::kOld);
instance->set_feedback_vectors(*vectors);
- for (int i = 0; i < num_functions; i++) {
- int func_index = module_->num_imported_functions + i;
- int slots = NumFeedbackSlots(module_, func_index);
- if (slots == 0) continue;
- if (v8_flags.trace_wasm_speculative_inlining) {
- PrintF("[Function %d (declared %d): allocating %d feedback slots]\n",
- func_index, i, slots);
- }
- Handle<FixedArray> feedback =
- isolate_->factory()->NewFixedArrayWithZeroes(slots);
- vectors->set(i, *feedback);
- }
}
//--------------------------------------------------------------------------
@@ -1128,7 +1118,7 @@ bool InstanceBuilder::ProcessImportedFunction(
// well as functions constructed via other means (e.g. WebAssembly.Function).
if (WasmExternalFunction::IsWasmExternalFunction(*value)) {
WasmInstanceObject::SetWasmInternalFunction(
- isolate_, instance, func_index,
+ instance, func_index,
WasmInternalFunction::FromExternal(
Handle<WasmExternalFunction>::cast(value), isolate_)
.ToHandleChecked());
@@ -1747,9 +1737,8 @@ bool InstanceBuilder::AllocateMemory() {
int maximum_pages = module_->has_maximum_pages
? static_cast<int>(module_->maximum_pages)
: WasmMemoryObject::kNoMaximum;
- auto shared = (module_->has_shared_memory && enabled_.has_threads())
- ? SharedFlag::kShared
- : SharedFlag::kNotShared;
+ auto shared =
+ module_->has_shared_memory ? SharedFlag::kShared : SharedFlag::kNotShared;
auto mem_type = module_->is_memory64 ? WasmMemoryFlag::kWasmMemory64
: WasmMemoryFlag::kWasmMemory32;
@@ -1781,7 +1770,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<Object> value = sanitized_imports_[index].value;
if (WasmExternalFunction::IsWasmExternalFunction(*value)) {
WasmInstanceObject::SetWasmInternalFunction(
- isolate_, instance, import.index,
+ instance, import.index,
WasmInternalFunction::FromExternal(
Handle<WasmExternalFunction>::cast(value), isolate_)
.ToHandleChecked());
diff --git a/deps/v8/src/wasm/names-provider.cc b/deps/v8/src/wasm/names-provider.cc
index ce80824eea..a5ee411ca4 100644
--- a/deps/v8/src/wasm/names-provider.cc
+++ b/deps/v8/src/wasm/names-provider.cc
@@ -319,26 +319,30 @@ void NamesProvider::PrintGlobalName(StringBuilder& out, uint32_t global_index,
}
void NamesProvider::PrintElementSegmentName(StringBuilder& out,
- uint32_t element_segment_index) {
+ uint32_t element_segment_index,
+ IndexAsComment index_as_comment) {
DecodeNamesIfNotYetDone();
WireBytesRef ref =
Get(name_section_names_->element_segment_names_, element_segment_index);
if (ref.is_set()) {
out << '$';
WriteRef(out, ref);
+ MaybeAddComment(out, element_segment_index, index_as_comment);
} else {
out << "$elem" << element_segment_index;
}
}
void NamesProvider::PrintDataSegmentName(StringBuilder& out,
- uint32_t data_segment_index) {
+ uint32_t data_segment_index,
+ IndexAsComment index_as_comment) {
DecodeNamesIfNotYetDone();
WireBytesRef ref =
Get(name_section_names_->data_segment_names_, data_segment_index);
if (ref.is_set()) {
out << '$';
WriteRef(out, ref);
+ MaybeAddComment(out, data_segment_index, index_as_comment);
} else {
out << "$data" << data_segment_index;
}
diff --git a/deps/v8/src/wasm/names-provider.h b/deps/v8/src/wasm/names-provider.h
index 14ba899cd8..eea9dda75c 100644
--- a/deps/v8/src/wasm/names-provider.h
+++ b/deps/v8/src/wasm/names-provider.h
@@ -56,9 +56,11 @@ class V8_EXPORT_PRIVATE NamesProvider {
IndexAsComment index_as_comment = kDontPrintIndex);
void PrintGlobalName(StringBuilder& out, uint32_t global_index,
IndexAsComment index_as_comment = kDontPrintIndex);
- void PrintElementSegmentName(StringBuilder& out,
- uint32_t element_segment_index);
- void PrintDataSegmentName(StringBuilder& out, uint32_t data_segment_index);
+ void PrintElementSegmentName(
+ StringBuilder& out, uint32_t element_segment_index,
+ IndexAsComment index_as_comment = kDontPrintIndex);
+ void PrintDataSegmentName(StringBuilder& out, uint32_t data_segment_index,
+ IndexAsComment index_as_comment = kDontPrintIndex);
void PrintFieldName(StringBuilder& out, uint32_t struct_index,
uint32_t field_index,
IndexAsComment index_as_comment = kDontPrintIndex);
diff --git a/deps/v8/src/wasm/pgo.cc b/deps/v8/src/wasm/pgo.cc
index 8d9069bee2..5f17cf1b15 100644
--- a/deps/v8/src/wasm/pgo.cc
+++ b/deps/v8/src/wasm/pgo.cc
@@ -9,17 +9,22 @@
namespace v8::internal::wasm {
+constexpr uint8_t kFunctionExecutedBit = 1 << 0;
+constexpr uint8_t kFunctionTieredUpBit = 1 << 1;
+
class ProfileGenerator {
public:
- ProfileGenerator(const WasmModule* module)
+ ProfileGenerator(const WasmModule* module,
+ const uint32_t* tiering_budget_array)
: module_(module),
- type_feedback_mutex_guard_(&module->type_feedback.mutex) {}
+ type_feedback_mutex_guard_(&module->type_feedback.mutex),
+ tiering_budget_array_(tiering_budget_array) {}
base::OwnedVector<uint8_t> GetProfileData() {
ZoneBuffer buffer{&zone_};
SerializeTypeFeedback(buffer);
- // TODO(13209): Serialize tiering information.
+ SerializeTieringInfo(buffer);
return base::OwnedVector<uint8_t>::Of(buffer);
}
@@ -34,7 +39,7 @@ class ProfileGenerator {
std::vector<uint32_t> ordered_function_indexes;
ordered_function_indexes.reserve(feedback_for_function.size());
for (const auto& entry : feedback_for_function) {
- // Skip functions for which we have to feedback.
+ // Skip functions for which we have no feedback.
if (entry.second.feedback_vector.empty()) continue;
ordered_function_indexes.push_back(entry.first);
}
@@ -64,11 +69,36 @@ class ProfileGenerator {
}
}
+ void SerializeTieringInfo(ZoneBuffer& buffer) {
+ std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_for_function =
+ module_->type_feedback.feedback_for_function;
+ const uint32_t initial_budget = v8_flags.wasm_tiering_budget;
+ for (uint32_t declared_index = 0;
+ declared_index < module_->num_declared_functions; ++declared_index) {
+ uint32_t func_index = declared_index + module_->num_imported_functions;
+ auto feedback_it = feedback_for_function.find(func_index);
+ int prio = feedback_it == feedback_for_function.end()
+ ? 0
+ : feedback_it->second.tierup_priority;
+ DCHECK_LE(0, prio);
+ uint32_t remaining_budget = tiering_budget_array_[declared_index];
+ DCHECK_GE(initial_budget, remaining_budget);
+
+ bool was_tiered_up = prio > 0;
+ bool was_executed = was_tiered_up || remaining_budget != initial_budget;
+
+ // TODO(13209): Make this less V8-specific for productionization.
+ buffer.write_u8((was_executed ? kFunctionExecutedBit : 0) |
+ (was_tiered_up ? kFunctionTieredUpBit : 0));
+ }
+ }
+
private:
const WasmModule* module_;
AccountingAllocator allocator_;
Zone zone_{&allocator_, "wasm::ProfileGenerator"};
base::MutexGuard type_feedback_mutex_guard_;
+ const uint32_t* const tiering_budget_array_;
};
void DeserializeTypeFeedback(Decoder& decoder, WasmModule* module) {
@@ -113,18 +143,42 @@ void DeserializeTypeFeedback(Decoder& decoder, WasmModule* module) {
}
}
-void RestoreProfileData(WasmModule* module,
- base::Vector<uint8_t> profile_data) {
+std::unique_ptr<ProfileInformation> DeserializeTieringInformation(
+ Decoder& decoder, WasmModule* module) {
+ std::vector<uint32_t> executed_functions;
+ std::vector<uint32_t> tiered_up_functions;
+ uint32_t start = module->num_imported_functions;
+ uint32_t end = start + module->num_declared_functions;
+ for (uint32_t func_index = start; func_index < end; ++func_index) {
+ uint8_t tiering_info = decoder.consume_u8("tiering info");
+ CHECK_EQ(0, tiering_info & ~3);
+ bool was_executed = tiering_info & kFunctionExecutedBit;
+ bool was_tiered_up = tiering_info & kFunctionTieredUpBit;
+ if (was_tiered_up) tiered_up_functions.push_back(func_index);
+ if (was_executed) executed_functions.push_back(func_index);
+ }
+
+ return std::make_unique<ProfileInformation>(std::move(executed_functions),
+ std::move(tiered_up_functions));
+}
+
+std::unique_ptr<ProfileInformation> RestoreProfileData(
+ WasmModule* module, base::Vector<uint8_t> profile_data) {
Decoder decoder{profile_data.begin(), profile_data.end()};
DeserializeTypeFeedback(decoder, module);
+ std::unique_ptr<ProfileInformation> pgo_info =
+ DeserializeTieringInformation(decoder, module);
CHECK(decoder.ok());
CHECK_EQ(decoder.pc(), decoder.end());
+
+ return pgo_info;
}
void DumpProfileToFile(const WasmModule* module,
- base::Vector<const uint8_t> wire_bytes) {
+ base::Vector<const uint8_t> wire_bytes,
+ uint32_t* tiering_budget_array) {
CHECK(!wire_bytes.empty());
// File are named `profile-wasm-<hash>`.
// We use the same hash as for reported scripts, to make it easier to
@@ -133,7 +187,7 @@ void DumpProfileToFile(const WasmModule* module,
base::EmbeddedVector<char, 32> filename;
SNPrintF(filename, "profile-wasm-%08x", hash);
- ProfileGenerator profile_generator{module};
+ ProfileGenerator profile_generator{module, tiering_budget_array};
base::OwnedVector<uint8_t> profile_data = profile_generator.GetProfileData();
PrintF("Dumping Wasm PGO data to file '%s' (%zu bytes)\n", filename.begin(),
@@ -145,8 +199,8 @@ void DumpProfileToFile(const WasmModule* module,
}
}
-void LoadProfileFromFile(WasmModule* module,
- base::Vector<const uint8_t> wire_bytes) {
+std::unique_ptr<ProfileInformation> LoadProfileFromFile(
+ WasmModule* module, base::Vector<const uint8_t> wire_bytes) {
CHECK(!wire_bytes.empty());
// File are named `profile-wasm-<hash>`.
// We use the same hash as for reported scripts, to make it easier to
@@ -158,7 +212,7 @@ void LoadProfileFromFile(WasmModule* module,
FILE* file = base::OS::FOpen(filename.begin(), "rb");
if (!file) {
PrintF("No Wasm PGO data found: Cannot open file '%s'\n", filename.begin());
- return;
+ return {};
}
fseek(file, 0, SEEK_END);
@@ -176,11 +230,7 @@ void LoadProfileFromFile(WasmModule* module,
base::Fclose(file);
- RestoreProfileData(module, profile_data.as_vector());
-
- // Check that the generated profile is deterministic.
- DCHECK_EQ(profile_data.as_vector(),
- ProfileGenerator{module}.GetProfileData().as_vector());
+ return RestoreProfileData(module, profile_data.as_vector());
}
} // namespace v8::internal::wasm
diff --git a/deps/v8/src/wasm/pgo.h b/deps/v8/src/wasm/pgo.h
index 31a2269896..082a08747a 100644
--- a/deps/v8/src/wasm/pgo.h
+++ b/deps/v8/src/wasm/pgo.h
@@ -9,17 +9,43 @@
#ifndef V8_WASM_PGO_H_
#define V8_WASM_PGO_H_
+#include <vector>
+
#include "src/base/vector.h"
namespace v8::internal::wasm {
struct WasmModule;
+class ProfileInformation {
+ public:
+ ProfileInformation(std::vector<uint32_t> executed_functions,
+ std::vector<uint32_t> tiered_up_functions)
+ : executed_functions_(std::move(executed_functions)),
+ tiered_up_functions_(std::move(tiered_up_functions)) {}
+
+ // Disallow copying (not needed, so most probably a bug).
+ ProfileInformation(const ProfileInformation&) = delete;
+ ProfileInformation& operator=(const ProfileInformation&) = delete;
+
+ base::Vector<const uint32_t> executed_functions() const {
+ return base::VectorOf(executed_functions_);
+ }
+ base::Vector<const uint32_t> tiered_up_functions() const {
+ return base::VectorOf(tiered_up_functions_);
+ }
+
+ private:
+ const std::vector<uint32_t> executed_functions_;
+ const std::vector<uint32_t> tiered_up_functions_;
+};
+
void DumpProfileToFile(const WasmModule* module,
- base::Vector<const uint8_t> wire_bytes);
+ base::Vector<const uint8_t> wire_bytes,
+ uint32_t* tiering_budget_array);
-void LoadProfileFromFile(WasmModule* module,
- base::Vector<const uint8_t> wire_bytes);
+V8_WARN_UNUSED_RESULT std::unique_ptr<ProfileInformation> LoadProfileFromFile(
+ WasmModule* module, base::Vector<const uint8_t> wire_bytes);
} // namespace v8::internal::wasm
diff --git a/deps/v8/src/wasm/string-builder-multiline.h b/deps/v8/src/wasm/string-builder-multiline.h
index 5a8704a0b5..619b8bdfdb 100644
--- a/deps/v8/src/wasm/string-builder-multiline.h
+++ b/deps/v8/src/wasm/string-builder-multiline.h
@@ -92,7 +92,7 @@ class MultiLineStringBuilder : public StringBuilder {
// Write the unfinished line into its new location.
start_here();
char* new_location = allocate(unfinished_length);
- memcpy(new_location, unfinished_start, unfinished_length);
+ memmove(new_location, unfinished_start, unfinished_length);
if (label_source >= unfinished_start &&
label_source < unfinished_start + unfinished_length) {
label_source = new_location + (label_source - unfinished_start);
@@ -137,6 +137,8 @@ class MultiLineStringBuilder : public StringBuilder {
out.write(last_start, len);
}
+ size_t ApproximateSizeMB() { return approximate_size_mb(); }
+
private:
struct Line {
Line(const char* d, size_t length, uint32_t bytecode_offset)
diff --git a/deps/v8/src/wasm/string-builder.h b/deps/v8/src/wasm/string-builder.h
index 903414076b..804494d078 100644
--- a/deps/v8/src/wasm/string-builder.h
+++ b/deps/v8/src/wasm/string-builder.h
@@ -70,6 +70,11 @@ class StringBuilder {
explicit StringBuilder(OnGrowth on_growth) : on_growth_(on_growth) {}
void start_here() { start_ = cursor_; }
+ size_t approximate_size_mb() {
+ static_assert(kChunkSize == size_t{MB});
+ return chunks_.size();
+ }
+
private:
void Grow(size_t requested) {
size_t used = length();
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index da001f037a..22281a0e6f 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -62,7 +62,7 @@ class HeapType {
kFunc = kV8MaxWasmTypes, // shorthand: c
kEq, // shorthand: q
kI31, // shorthand: j
- kData, // shorthand: o
+ kStruct, // shorthand: o
kArray, // shorthand: g
kAny, //
kExtern, // shorthand: a.
@@ -90,8 +90,8 @@ class HeapType {
return HeapType(kAny);
case ValueTypeCode::kExternRefCode:
return HeapType(kExtern);
- case ValueTypeCode::kDataRefCode:
- return HeapType(kData);
+ case ValueTypeCode::kStructRefCode:
+ return HeapType(kStruct);
case ValueTypeCode::kArrayRefCode:
return HeapType(kArray);
case ValueTypeCode::kStringRefCode:
@@ -156,8 +156,8 @@ class HeapType {
return std::string("eq");
case kI31:
return std::string("i31");
- case kData:
- return std::string("data");
+ case kStruct:
+ return std::string("struct");
case kArray:
return std::string("array");
case kExtern:
@@ -195,8 +195,8 @@ class HeapType {
return mask | kEqRefCode;
case kI31:
return mask | kI31RefCode;
- case kData:
- return mask | kDataRefCode;
+ case kStruct:
+ return mask | kStructRefCode;
case kArray:
return mask | kArrayRefCode;
case kExtern:
@@ -402,6 +402,7 @@ class ValueType {
}
/******************************** Type checks *******************************/
+ // Includes s128.
constexpr bool is_numeric() const { return wasm::is_numeric(kind()); }
constexpr bool is_reference() const { return wasm::is_reference(kind()); }
@@ -428,6 +429,14 @@ class ValueType {
constexpr bool is_bottom() const { return kind() == kBottom; }
+ // These can occur as the result of type propagation, but never in
+ // reachable control flow.
+ constexpr bool is_uninhabited() const {
+ return is_non_nullable() && (is_reference_to(HeapType::kNone) ||
+ is_reference_to(HeapType::kNoExtern) ||
+ is_reference_to(HeapType::kNoFunc));
+ }
+
constexpr bool is_packed() const { return wasm::is_packed(kind()); }
constexpr ValueType Unpacked() const {
@@ -436,12 +445,6 @@ class ValueType {
// If {this} is (ref null $t), returns (ref $t). Otherwise, returns {this}.
constexpr ValueType AsNonNull() const {
- if (is_reference_to(HeapType::kNone) ||
- is_reference_to(HeapType::kNoExtern) ||
- is_reference_to(HeapType::kNoFunc)) {
- // Non-null none type is not a valid type.
- return ValueType::Primitive(kBottom);
- }
return is_nullable() ? Ref(heap_type()) : *this;
}
@@ -550,8 +553,8 @@ class ValueType {
return kAnyRefCode;
case HeapType::kI31:
return kI31RefCode;
- case HeapType::kData:
- return kDataRefCode;
+ case HeapType::kStruct:
+ return kStructRefCode;
case HeapType::kArray:
return kArrayRefCode;
case HeapType::kString:
@@ -702,7 +705,7 @@ constexpr ValueType kWasmAnyRef = ValueType::RefNull(HeapType::kAny);
constexpr ValueType kWasmExternRef = ValueType::RefNull(HeapType::kExtern);
constexpr ValueType kWasmEqRef = ValueType::RefNull(HeapType::kEq);
constexpr ValueType kWasmI31Ref = ValueType::RefNull(HeapType::kI31);
-constexpr ValueType kWasmDataRef = ValueType::RefNull(HeapType::kData);
+constexpr ValueType kWasmStructRef = ValueType::RefNull(HeapType::kStruct);
constexpr ValueType kWasmArrayRef = ValueType::RefNull(HeapType::kArray);
constexpr ValueType kWasmStringRef = ValueType::RefNull(HeapType::kString);
constexpr ValueType kWasmStringViewWtf8 =
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
index 305d7cc361..b123287ed8 100644
--- a/deps/v8/src/wasm/wasm-arguments.h
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -52,11 +52,11 @@ class CWasmArgumentsPacker {
static int TotalSize(const FunctionSig* sig) {
int return_size = 0;
for (ValueType t : sig->returns()) {
- return_size += t.value_kind_size();
+ return_size += t.value_kind_full_size();
}
int param_size = 0;
for (ValueType t : sig->parameters()) {
- param_size += t.value_kind_size();
+ param_size += t.value_kind_full_size();
}
return std::max(return_size, param_size);
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 6f9bd745a9..845384606d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -785,7 +785,6 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
}
DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
- allocated_code_space_.Merge(code_space);
generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
@@ -1887,8 +1886,8 @@ NativeModule::~NativeModule() {
import_wrapper_cache_.reset();
// If experimental PGO support is enabled, serialize the PGO data now.
- if (V8_UNLIKELY(FLAG_experimental_wasm_pgo_to_file)) {
- DumpProfileToFile(module_.get(), wire_bytes());
+ if (V8_UNLIKELY(v8_flags.experimental_wasm_pgo_to_file)) {
+ DumpProfileToFile(module_.get(), wire_bytes(), tiering_budgets_.get());
}
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 4b64300a8a..5dc13960f1 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -54,6 +54,7 @@ struct WasmModule;
FOREACH_WASM_TRAPREASON(VTRAP) \
V(WasmCompileLazy) \
V(WasmTriggerTierUp) \
+ V(WasmLiftoffFrameSetup) \
V(WasmDebugBreak) \
V(WasmInt32ToHeapNumber) \
V(WasmTaggedNonSmiToInt32) \
@@ -62,10 +63,8 @@ struct WasmModule;
V(WasmTaggedToFloat64) \
V(WasmAllocateJSArray) \
V(WasmAtomicNotify) \
- V(WasmI32AtomicWait32) \
- V(WasmI32AtomicWait64) \
- V(WasmI64AtomicWait32) \
- V(WasmI64AtomicWait64) \
+ V(WasmI32AtomicWait) \
+ V(WasmI64AtomicWait) \
V(WasmGetOwnProperty) \
V(WasmRefFunc) \
V(WasmMemoryGrow) \
@@ -596,8 +595,6 @@ class WasmCodeAllocator {
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
DisjointAllocationPool free_code_space_;
- // Code space that was allocated for code (subset of {owned_code_space_}).
- DisjointAllocationPool allocated_code_space_;
// Code space that was allocated before but is dead now. Full pages within
// this region are discarded. It's still a subset of {owned_code_space_}.
DisjointAllocationPool freed_code_space_;
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 316694ec59..8d40b4646f 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -48,7 +48,7 @@ enum ValueTypeCode : uint8_t {
kRefNullCode = 0x6c,
kRefCode = 0x6b,
kI31RefCode = 0x6a,
- kDataRefCode = 0x67,
+ kStructRefCode = 0x67,
kArrayRefCode = 0x66,
kNoneCode = 0x65,
kStringRefCode = 0x64,
@@ -74,12 +74,14 @@ enum ImportExportKindCode : uint8_t {
};
enum LimitsFlags : uint8_t {
- kNoMaximum = 0x00, // Also valid for table limits.
- kWithMaximum = 0x01, // Also valid for table limits.
- kSharedNoMaximum = 0x02, // Only valid for memory limits.
- kSharedWithMaximum = 0x03, // Only valid for memory limits.
- kMemory64NoMaximum = 0x04, // Only valid for memory limits.
- kMemory64WithMaximum = 0x05 // Only valid for memory limits.
+ kNoMaximum = 0x00, // Also valid for table limits.
+ kWithMaximum = 0x01, // Also valid for table limits.
+ kSharedNoMaximum = 0x02, // Only valid for memory limits.
+ kSharedWithMaximum = 0x03, // Only valid for memory limits.
+ kMemory64NoMaximum = 0x04, // Only valid for memory limits.
+ kMemory64WithMaximum = 0x05, // Only valid for memory limits.
+ kMemory64SharedNoMaximum = 0x06, // Only valid for memory limits.
+ kMemory64SharedWithMaximum = 0x07 // Only valid for memory limits.
};
// Flags for data and element segments.
diff --git a/deps/v8/src/wasm/wasm-disassembler-impl.h b/deps/v8/src/wasm/wasm-disassembler-impl.h
index 3530969218..4b84171aec 100644
--- a/deps/v8/src/wasm/wasm-disassembler-impl.h
+++ b/deps/v8/src/wasm/wasm-disassembler-impl.h
@@ -21,7 +21,7 @@ namespace v8 {
namespace internal {
namespace wasm {
-template <Decoder::ValidateFlag validate>
+template <typename ValidationTag>
class ImmediatesPrinter;
using IndexAsComment = NamesProvider::IndexAsComment;
@@ -74,9 +74,9 @@ class OffsetsProvider;
// FunctionBodyDisassembler.
class V8_EXPORT_PRIVATE FunctionBodyDisassembler
- : public WasmDecoder<Decoder::kFullValidation> {
+ : public WasmDecoder<Decoder::FullValidationTag> {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ using ValidationTag = Decoder::FullValidationTag;
enum FunctionHeader : bool { kSkipHeader = false, kPrintHeader = true };
FunctionBodyDisassembler(Zone* zone, const WasmModule* module,
@@ -84,8 +84,8 @@ class V8_EXPORT_PRIVATE FunctionBodyDisassembler
const FunctionSig* sig, const byte* start,
const byte* end, uint32_t offset,
NamesProvider* names)
- : WasmDecoder<validate>(zone, module, WasmFeatures::All(), detected, sig,
- start, end, offset),
+ : WasmDecoder<ValidationTag>(zone, module, WasmFeatures::All(), detected,
+ sig, start, end, offset),
func_index_(func_index),
names_(names) {}
@@ -108,7 +108,7 @@ class V8_EXPORT_PRIVATE FunctionBodyDisassembler
return label_stack_[label_stack_.size() - 1 - depth];
}
- friend class ImmediatesPrinter<validate>;
+ friend class ImmediatesPrinter<ValidationTag>;
uint32_t func_index_;
WasmOpcode current_opcode_ = kExprUnreachable;
NamesProvider* names_;
@@ -141,7 +141,7 @@ class ModuleDisassembler {
V8_EXPORT_PRIVATE void PrintTypeDefinition(uint32_t type_index,
Indentation indendation,
IndexAsComment index_as_comment);
- V8_EXPORT_PRIVATE void PrintModule(Indentation indentation);
+ V8_EXPORT_PRIVATE void PrintModule(Indentation indentation, size_t max_mb);
private:
void PrintImportName(const WasmImport& import);
diff --git a/deps/v8/src/wasm/wasm-disassembler.cc b/deps/v8/src/wasm/wasm-disassembler.cc
index 3504b6d998..3ab0805178 100644
--- a/deps/v8/src/wasm/wasm-disassembler.cc
+++ b/deps/v8/src/wasm/wasm-disassembler.cc
@@ -26,7 +26,7 @@ void Disassemble(const WasmModule* module, ModuleWireBytes wire_bytes,
AccountingAllocator allocator;
ModuleDisassembler md(out, module, names, wire_bytes, &allocator,
function_body_offsets);
- md.PrintModule({0, 2});
+ md.PrintModule({0, 2}, v8_flags.wasm_disassembly_max_mb);
out.ToDisassemblyCollector(collector);
}
@@ -255,7 +255,7 @@ WasmOpcode FunctionBodyDisassembler::GetOpcode() {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
if (!WasmOpcodes::IsPrefixOpcode(opcode)) return opcode;
uint32_t opcode_length;
- return read_prefixed_opcode<validate>(pc_, &opcode_length);
+ return read_prefixed_opcode<ValidationTag>(pc_, &opcode_length);
}
void FunctionBodyDisassembler::PrintHexNumber(StringBuilder& out,
@@ -278,7 +278,7 @@ void FunctionBodyDisassembler::PrintHexNumber(StringBuilder& out,
////////////////////////////////////////////////////////////////////////////////
// ImmediatesPrinter.
-template <Decoder::ValidateFlag validate>
+template <typename ValidationTag>
class ImmediatesPrinter {
public:
ImmediatesPrinter(StringBuilder& out, FunctionBodyDisassembler* owner)
@@ -309,7 +309,7 @@ class ImmediatesPrinter {
owner_->out_->PatchLabel(label_info, out_.start() + label_start_position);
}
- void BlockType(BlockTypeImmediate<validate>& imm) {
+ void BlockType(BlockTypeImmediate& imm) {
if (imm.type == kWasmBottom) {
const FunctionSig* sig = owner_->module_->signature(imm.sig_index);
PrintSignatureOneLine(out_, sig, 0 /* ignored */, names(), false);
@@ -322,95 +322,91 @@ class ImmediatesPrinter {
}
}
- void HeapType(HeapTypeImmediate<validate>& imm) {
+ void HeapType(HeapTypeImmediate& imm) {
out_ << " ";
names()->PrintHeapType(out_, imm.type);
if (imm.type.is_index()) use_type(imm.type.ref_index());
}
- void BranchDepth(BranchDepthImmediate<validate>& imm) {
- PrintDepthAsLabel(imm.depth);
- }
+ void BranchDepth(BranchDepthImmediate& imm) { PrintDepthAsLabel(imm.depth); }
- void BranchTable(BranchTableImmediate<validate>& imm) {
+ void BranchTable(BranchTableImmediate& imm) {
const byte* pc = imm.table;
for (uint32_t i = 0; i <= imm.table_count; i++) {
uint32_t length;
- uint32_t target = owner_->read_u32v<validate>(pc, &length);
+ uint32_t target = owner_->read_u32v<ValidationTag>(pc, &length);
PrintDepthAsLabel(target);
pc += length;
}
}
- void CallIndirect(CallIndirectImmediate<validate>& imm) {
+ void CallIndirect(CallIndirectImmediate& imm) {
const FunctionSig* sig = owner_->module_->signature(imm.sig_imm.index);
PrintSignatureOneLine(out_, sig, 0 /* ignored */, names(), false);
if (imm.table_imm.index != 0) TableIndex(imm.table_imm);
}
- void SelectType(SelectTypeImmediate<validate>& imm) {
+ void SelectType(SelectTypeImmediate& imm) {
out_ << " ";
names()->PrintValueType(out_, imm.type);
}
- void MemoryAccess(MemoryAccessImmediate<validate>& imm) {
+ void MemoryAccess(MemoryAccessImmediate& imm) {
if (imm.offset != 0) out_ << " offset=" << imm.offset;
if (imm.alignment != GetDefaultAlignment(owner_->current_opcode_)) {
out_ << " align=" << (1u << imm.alignment);
}
}
- void SimdLane(SimdLaneImmediate<validate>& imm) {
- out_ << " " << uint32_t{imm.lane};
- }
+ void SimdLane(SimdLaneImmediate& imm) { out_ << " " << uint32_t{imm.lane}; }
- void Field(FieldImmediate<validate>& imm) {
+ void Field(FieldImmediate& imm) {
TypeIndex(imm.struct_imm);
out_ << " ";
names()->PrintFieldName(out_, imm.struct_imm.index, imm.field_imm.index);
}
- void Length(IndexImmediate<validate>& imm) {
+ void Length(IndexImmediate& imm) {
out_ << " " << imm.index; // --
}
- void TagIndex(TagIndexImmediate<validate>& imm) {
+ void TagIndex(TagIndexImmediate& imm) {
out_ << " ";
names()->PrintTagName(out_, imm.index);
}
- void FunctionIndex(IndexImmediate<validate>& imm) {
+ void FunctionIndex(IndexImmediate& imm) {
out_ << " ";
names()->PrintFunctionName(out_, imm.index, NamesProvider::kDevTools);
}
- void TypeIndex(IndexImmediate<validate>& imm) {
+ void TypeIndex(IndexImmediate& imm) {
out_ << " ";
names()->PrintTypeName(out_, imm.index);
use_type(imm.index);
}
- void LocalIndex(IndexImmediate<validate>& imm) {
+ void LocalIndex(IndexImmediate& imm) {
out_ << " ";
names()->PrintLocalName(out_, func_index(), imm.index);
}
- void GlobalIndex(IndexImmediate<validate>& imm) {
+ void GlobalIndex(IndexImmediate& imm) {
out_ << " ";
names()->PrintGlobalName(out_, imm.index);
}
- void TableIndex(IndexImmediate<validate>& imm) {
+ void TableIndex(IndexImmediate& imm) {
out_ << " ";
names()->PrintTableName(out_, imm.index);
}
- void MemoryIndex(MemoryIndexImmediate<validate>& imm) {
+ void MemoryIndex(MemoryIndexImmediate& imm) {
if (imm.index == 0) return;
out_ << " " << imm.index;
}
- void DataSegmentIndex(IndexImmediate<validate>& imm) {
+ void DataSegmentIndex(IndexImmediate& imm) {
if (kSkipDataSegmentNames) {
out_ << " " << imm.index;
} else {
@@ -419,16 +415,16 @@ class ImmediatesPrinter {
}
}
- void ElemSegmentIndex(IndexImmediate<validate>& imm) {
+ void ElemSegmentIndex(IndexImmediate& imm) {
out_ << " ";
names()->PrintElementSegmentName(out_, imm.index);
}
- void I32Const(ImmI32Immediate<validate>& imm) {
+ void I32Const(ImmI32Immediate& imm) {
out_ << " " << imm.value; // --
}
- void I64Const(ImmI64Immediate<validate>& imm) {
+ void I64Const(ImmI64Immediate& imm) {
if (imm.value >= 0) {
out_ << " " << static_cast<uint64_t>(imm.value);
} else {
@@ -436,10 +432,12 @@ class ImmediatesPrinter {
}
}
- void F32Const(ImmF32Immediate<validate>& imm) {
+ void F32Const(ImmF32Immediate& imm) {
float f = imm.value;
if (f == 0) {
out_ << (1 / f < 0 ? " -0.0" : " 0.0");
+ } else if (std::isinf(f)) {
+ out_ << (f > 0 ? " inf" : " -inf");
} else if (std::isnan(f)) {
uint32_t bits = base::bit_cast<uint32_t>(f);
uint32_t payload = bits & 0x7F'FFFFu;
@@ -452,12 +450,15 @@ class ImmediatesPrinter {
}
} else {
std::ostringstream o;
- o << std::setprecision(std::numeric_limits<float>::digits10 + 1) << f;
+ // TODO(dlehmann): Change to `std::format` (C++20) or to `std::to_chars`
+ // (C++17) once available, so that `0.1` isn't printed as `0.100000001`
+ // any more.
+ o << std::setprecision(std::numeric_limits<float>::max_digits10) << f;
out_ << " " << o.str();
}
}
- void F64Const(ImmF64Immediate<validate>& imm) {
+ void F64Const(ImmF64Immediate& imm) {
double d = imm.value;
if (d == 0) {
out_ << (1 / d < 0 ? " -0.0" : " 0.0");
@@ -480,7 +481,7 @@ class ImmediatesPrinter {
}
}
- void S128Const(Simd128Immediate<validate>& imm) {
+ void S128Const(Simd128Immediate& imm) {
if (owner_->current_opcode_ == kExprI8x16Shuffle) {
for (int i = 0; i < 16; i++) {
out_ << " " << uint32_t{imm.value[i]};
@@ -499,28 +500,28 @@ class ImmediatesPrinter {
}
}
- void StringConst(StringConstImmediate<validate>& imm) {
+ void StringConst(StringConstImmediate& imm) {
// TODO(jkummerow): Print (a prefix of) the string?
out_ << " " << imm.index;
}
- void MemoryInit(MemoryInitImmediate<validate>& imm) {
+ void MemoryInit(MemoryInitImmediate& imm) {
DataSegmentIndex(imm.data_segment);
if (imm.memory.index != 0) out_ << " " << uint32_t{imm.memory.index};
}
- void MemoryCopy(MemoryCopyImmediate<validate>& imm) {
+ void MemoryCopy(MemoryCopyImmediate& imm) {
if (imm.memory_dst.index == 0 && imm.memory_src.index == 0) return;
out_ << " " << uint32_t{imm.memory_dst.index};
out_ << " " << uint32_t{imm.memory_src.index};
}
- void TableInit(TableInitImmediate<validate>& imm) {
+ void TableInit(TableInitImmediate& imm) {
if (imm.table.index != 0) TableIndex(imm.table);
ElemSegmentIndex(imm.element_segment);
}
- void TableCopy(TableCopyImmediate<validate>& imm) {
+ void TableCopy(TableCopyImmediate& imm) {
if (imm.table_dst.index == 0 && imm.table_src.index == 0) return;
out_ << " ";
names()->PrintTableName(out_, imm.table_dst.index);
@@ -528,7 +529,7 @@ class ImmediatesPrinter {
names()->PrintTableName(out_, imm.table_src.index);
}
- void ArrayCopy(IndexImmediate<validate>& dst, IndexImmediate<validate>& src) {
+ void ArrayCopy(IndexImmediate& dst, IndexImmediate& src) {
out_ << " ";
names()->PrintTypeName(out_, dst.index);
out_ << " ";
@@ -550,9 +551,9 @@ class ImmediatesPrinter {
uint32_t FunctionBodyDisassembler::PrintImmediatesAndGetLength(
StringBuilder& out) {
- using Printer = ImmediatesPrinter<validate>;
+ using Printer = ImmediatesPrinter<ValidationTag>;
Printer imm_printer(out, this);
- return WasmDecoder::OpcodeLength<Printer>(this, this->pc_, &imm_printer);
+ return WasmDecoder::OpcodeLength<Printer>(this, this->pc_, imm_printer);
}
////////////////////////////////////////////////////////////////////////////////
@@ -564,11 +565,14 @@ class OffsetsProvider {
void CollectOffsets(const WasmModule* module, const byte* start,
const byte* end, AccountingAllocator* allocator) {
+ num_imported_tables_ = module->num_imported_tables;
+ num_imported_globals_ = module->num_imported_globals;
+ num_imported_tags_ = module->num_imported_tags;
type_offsets_.reserve(module->types.size());
import_offsets_.reserve(module->import_table.size());
- table_offsets_.reserve(module->tables.size());
- tag_offsets_.reserve(module->tags.size());
- global_offsets_.reserve(module->globals.size());
+ table_offsets_.reserve(module->tables.size() - num_imported_tables_);
+ tag_offsets_.reserve(module->tags.size() - num_imported_tags_);
+ global_offsets_.reserve(module->globals.size() - num_imported_globals_);
element_offsets_.reserve(module->elem_segments.size());
data_offsets_.reserve(module->data_segments.size());
@@ -624,19 +628,31 @@ class OffsetsProvider {
}
GETTER(type)
GETTER(import)
- GETTER(table)
- GETTER(tag)
- GETTER(global)
GETTER(element)
GETTER(data)
#undef GETTER
+#define IMPORT_ADJUSTED_GETTER(name) \
+ uint32_t name##_offset(uint32_t index) { \
+ if (!enabled_) return 0; \
+ DCHECK(index >= num_imported_##name##s_ && \
+ index - num_imported_##name##s_ < name##_offsets_.size()); \
+ return name##_offsets_[index - num_imported_##name##s_]; \
+ }
+ IMPORT_ADJUSTED_GETTER(table)
+ IMPORT_ADJUSTED_GETTER(tag)
+ IMPORT_ADJUSTED_GETTER(global)
+#undef IMPORT_ADJUSTED_GETTER
+
uint32_t memory_offset() { return memory_offset_; }
uint32_t start_offset() { return start_offset_; }
private:
bool enabled_{false};
+ uint32_t num_imported_tables_{0};
+ uint32_t num_imported_globals_{0};
+ uint32_t num_imported_tags_{0};
std::vector<uint32_t> type_offsets_;
std::vector<uint32_t> import_offsets_;
std::vector<uint32_t> table_offsets_;
@@ -736,12 +752,11 @@ void ModuleDisassembler::PrintTypeDefinition(uint32_t type_index,
}
}
-void ModuleDisassembler::PrintModule(Indentation indentation) {
+void ModuleDisassembler::PrintModule(Indentation indentation, size_t max_mb) {
// 0. General infrastructure.
// We don't store import/export information on {WasmTag} currently.
size_t num_tags = module_->tags.size();
std::vector<bool> exported_tags(num_tags, false);
- std::vector<bool> imported_tags(num_tags, false);
for (const WasmExport& ex : module_->export_table) {
if (ex.kind == kExternalTag) exported_tags[ex.index] = true;
}
@@ -816,16 +831,16 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
PrintExportName(kExternalTag, import.index);
}
PrintTagSignature(module_->tags[import.index].sig);
- imported_tags[import.index] = true;
break;
}
out_ << ")";
}
// IV. Tables
- for (uint32_t i = 0; i < module_->tables.size(); i++) {
+ for (uint32_t i = module_->num_imported_tables; i < module_->tables.size();
+ i++) {
const WasmTable& table = module_->tables[i];
- if (table.imported) continue;
+ DCHECK(!table.imported);
out_.NextLine(offsets_->table_offset(i));
out_ << indentation << "(table ";
names_->PrintTableName(out_, i, kIndicesAsComments);
@@ -849,8 +864,7 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
}
// VI.Tags
- for (uint32_t i = 0; i < module_->tags.size(); i++) {
- if (imported_tags[i]) continue;
+ for (uint32_t i = module_->num_imported_tags; i < module_->tags.size(); i++) {
const WasmTag& tag = module_->tags[i];
out_.NextLine(offsets_->tag_offset(i));
out_ << indentation << "(tag ";
@@ -864,9 +878,10 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
// TODO(jkummerow/12868): Implement.
// VIII. Globals
- for (uint32_t i = 0; i < module_->globals.size(); i++) {
+ for (uint32_t i = module_->num_imported_globals; i < module_->globals.size();
+ i++) {
const WasmGlobal& global = module_->globals[i];
- if (global.imported) continue;
+ DCHECK(!global.imported);
out_.NextLine(offsets_->global_offset(i));
out_ << indentation << "(global ";
names_->PrintGlobalName(out_, i, kIndicesAsComments);
@@ -890,7 +905,7 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
const WasmElemSegment& elem = module_->elem_segments[i];
out_.NextLine(offsets_->element_offset(i));
out_ << indentation << "(elem ";
- names_->PrintElementSegmentName(out_, i);
+ names_->PrintElementSegmentName(out_, i, kIndicesAsComments);
if (elem.status == WasmElemSegment::kStatusDeclarative) {
out_ << " declare";
} else if (elem.status == WasmElemSegment::kStatusActive) {
@@ -941,6 +956,10 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
function_body_offsets_->push_back(first_instruction_offset);
function_body_offsets_->push_back(d.pc_offset());
}
+ if (out_.ApproximateSizeMB() > max_mb) {
+ out_ << "<truncated...>";
+ return;
+ }
}
// XII. Data
@@ -950,7 +969,7 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
out_ << indentation << "(data";
if (!kSkipDataSegmentNames) {
out_ << " ";
- names_->PrintDataSegmentName(out_, i);
+ names_->PrintDataSegmentName(out_, i, kIndicesAsComments);
}
if (data.active) {
ValueType type = module_->is_memory64 ? kWasmI64 : kWasmI32;
@@ -960,6 +979,11 @@ void ModuleDisassembler::PrintModule(Indentation indentation) {
PrintString(data.source);
out_ << "\")";
out_.NextLine(0);
+
+ if (out_.ApproximateSizeMB() > max_mb) {
+ out_ << "<truncated...>";
+ return;
+ }
}
indentation.decrease();
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 7fc49ed058..065794d381 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -511,9 +511,10 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToNativeModule}.
+ constexpr ProfileInformation* kNoProfileInformation = nullptr;
std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
isolate, WasmFeatures::ForAsmjs(), thrower, std::move(result).value(),
- bytes, compilation_id, context_id);
+ bytes, compilation_id, context_id, kNoProfileInformation);
if (!native_module) return {};
return AsmWasmData::New(isolate, std::move(native_module), uses_bitset);
@@ -550,15 +551,16 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
}
// If experimental PGO via files is enabled, load profile information now.
- if (V8_UNLIKELY(FLAG_experimental_wasm_pgo_from_file)) {
- LoadProfileFromFile(module.get(), bytes.module_bytes());
+ std::unique_ptr<ProfileInformation> pgo_info;
+ if (V8_UNLIKELY(v8_flags.experimental_wasm_pgo_from_file)) {
+ pgo_info = LoadProfileFromFile(module.get(), bytes.module_bytes());
}
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToNativeModule}.
std::shared_ptr<NativeModule> native_module =
CompileToNativeModule(isolate, enabled, thrower, std::move(module), bytes,
- compilation_id, context_id);
+ compilation_id, context_id, pgo_info.get());
if (!native_module) return {};
#ifdef DEBUG
@@ -1018,6 +1020,14 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
DCHECK_EQ(0, isolates_.count(isolate));
isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
+#if defined(V8_COMPRESS_POINTERS)
+ // The null value is not accessible on mksnapshot runs.
+ if (isolate->snapshot_available()) {
+ null_tagged_compressed_ = V8HeapCompressionScheme::CompressTagged(
+ isolate->factory()->null_value()->ptr());
+ }
+#endif
+
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
@@ -1083,37 +1093,55 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
void WasmEngine::LogCode(base::Vector<WasmCode*> code_vec) {
if (code_vec.empty()) return;
- base::MutexGuard guard(&mutex_);
- NativeModule* native_module = code_vec[0]->native_module();
- DCHECK_EQ(1, native_modules_.count(native_module));
- for (Isolate* isolate : native_modules_[native_module]->isolates) {
- DCHECK_EQ(1, isolates_.count(isolate));
- IsolateInfo* info = isolates_[isolate].get();
- if (info->log_codes == false) continue;
- if (info->log_codes_task == nullptr) {
- auto new_task = std::make_unique<LogCodesTask>(
- &mutex_, &info->log_codes_task, isolate, this);
- info->log_codes_task = new_task.get();
- info->foreground_task_runner->PostTask(std::move(new_task));
- }
- if (info->code_to_log.empty()) {
- isolate->stack_guard()->RequestLogWasmCode();
- }
- for (WasmCode* code : code_vec) {
- DCHECK_EQ(native_module, code->native_module());
- code->IncRef();
- }
+ using TaskToSchedule =
+ std::pair<std::shared_ptr<v8::TaskRunner>, std::unique_ptr<LogCodesTask>>;
+ std::vector<TaskToSchedule> to_schedule;
+ {
+ base::MutexGuard guard(&mutex_);
+ NativeModule* native_module = code_vec[0]->native_module();
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ for (Isolate* isolate : native_modules_[native_module]->isolates) {
+ DCHECK_EQ(1, isolates_.count(isolate));
+ IsolateInfo* info = isolates_[isolate].get();
+ if (info->log_codes == false) continue;
+ if (info->log_codes_task == nullptr) {
+ auto new_task = std::make_unique<LogCodesTask>(
+ &mutex_, &info->log_codes_task, isolate, this);
+ info->log_codes_task = new_task.get();
+ // Store the LogCodeTasks to post them outside the WasmEngine::mutex_.
+ // Posting the task in the mutex can cause the following deadlock (only
+ // in d8): When d8 shuts down, it sets a terminate to the task runner.
+ // When the terminate flag in the taskrunner is set, all newly posted
+ // tasks get destroyed immediately. When the LogCodesTask gets
+ // destroyed, it takes the WasmEngine::mutex_ lock to deregister itself
+ // from the IsolateInfo. Therefore, as the LogCodesTask may get
+ // destroyed immediately when it gets posted, it cannot get posted when
+ // the WasmEngine::mutex_ lock is held.
+ to_schedule.emplace_back(info->foreground_task_runner,
+ std::move(new_task));
+ }
+ if (info->code_to_log.empty()) {
+ isolate->stack_guard()->RequestLogWasmCode();
+ }
+ for (WasmCode* code : code_vec) {
+ DCHECK_EQ(native_module, code->native_module());
+ code->IncRef();
+ }
- auto script_it = info->scripts.find(native_module);
- // If the script does not yet exist, logging will happen later. If the weak
- // handle is cleared already, we also don't need to log any more.
- if (script_it == info->scripts.end()) continue;
- auto& log_entry = info->code_to_log[script_it->second.script_id()];
- if (!log_entry.source_url) {
- log_entry.source_url = script_it->second.source_url();
+ auto script_it = info->scripts.find(native_module);
+ // If the script does not yet exist, logging will happen later. If the
+ // weak handle is cleared already, we also don't need to log any more.
+ if (script_it == info->scripts.end()) continue;
+ auto& log_entry = info->code_to_log[script_it->second.script_id()];
+ if (!log_entry.source_url) {
+ log_entry.source_url = script_it->second.source_url();
+ }
+ log_entry.code.insert(log_entry.code.end(), code_vec.begin(),
+ code_vec.end());
}
- log_entry.code.insert(log_entry.code.end(), code_vec.begin(),
- code_vec.end());
+ }
+ for (auto& [runner, task] : to_schedule) {
+ runner->PostTask(std::move(task));
}
}
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 27907c3961..5f721c3ad5 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -360,6 +360,12 @@ class V8_EXPORT_PRIVATE WasmEngine {
TypeCanonicalizer* type_canonicalizer() { return &type_canonicalizer_; }
+ // Returns either the compressed tagged pointer representing a null value or
+ // 0 if pointer compression is not available.
+ Tagged_t compressed_null_value_or_zero() const {
+ return null_tagged_compressed_;
+ }
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -395,6 +401,9 @@ class V8_EXPORT_PRIVATE WasmEngine {
std::atomic<int> next_compilation_id_{0};
+ // Compressed tagged pointer to null value.
+ std::atomic<Tagged_t> null_tagged_compressed_{0};
+
TypeCanonicalizer type_canonicalizer_;
// This mutex protects all information which is mutated concurrently or
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 4a81d1bdb7..aad2f6aab8 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -588,12 +588,81 @@ void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
}
}
-void array_fill_with_zeroes_wrapper(Address raw_array, uint32_t length,
- uint32_t element_size_bytes) {
+void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
+ uint32_t raw_type,
+ Address initial_value_addr) {
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
- std::memset(ArrayElementAddress(raw_array, 0, element_size_bytes), 0,
- length * element_size_bytes);
+ ValueType type = ValueType::FromRawBitField(raw_type);
+ int8_t* initial_element_address = reinterpret_cast<int8_t*>(
+ ArrayElementAddress(raw_array, 0, type.value_kind_size()));
+ int64_t initial_value = *reinterpret_cast<int64_t*>(initial_value_addr);
+ int bytes_to_set = length * type.value_kind_size();
+
+ // If the initial value is zero, we memset the array.
+ if (type.is_numeric() && initial_value == 0) {
+ std::memset(initial_element_address, 0, bytes_to_set);
+ return;
+ }
+
+ // We implement the general case by setting the first 8 bytes manually, then
+ // filling the rest by exponentially growing {memmove}s.
+
+ DCHECK_GE(static_cast<size_t>(bytes_to_set), sizeof(int64_t));
+
+ switch (type.kind()) {
+ case kI64:
+ case kF64: {
+ *reinterpret_cast<int64_t*>(initial_element_address) = initial_value;
+ break;
+ }
+ case kI32:
+ case kF32: {
+ int32_t* base = reinterpret_cast<int32_t*>(initial_element_address);
+ base[0] = base[1] = static_cast<int32_t>(initial_value);
+ break;
+ }
+ case kI16: {
+ int16_t* base = reinterpret_cast<int16_t*>(initial_element_address);
+ base[0] = base[1] = base[2] = base[3] =
+ static_cast<int16_t>(initial_value);
+ break;
+ }
+ case kI8: {
+ int8_t* base = reinterpret_cast<int8_t*>(initial_element_address);
+ for (size_t i = 0; i < sizeof(int64_t); i++) {
+ base[i] = static_cast<int8_t>(initial_value);
+ }
+ break;
+ }
+ case kRefNull:
+ if constexpr (kTaggedSize == 4) {
+ int32_t* base = reinterpret_cast<int32_t*>(initial_element_address);
+ base[0] = base[1] = static_cast<int32_t>(initial_value);
+ } else {
+ *reinterpret_cast<int64_t*>(initial_element_address) = initial_value;
+ }
+ break;
+ case kS128:
+ case kRtt:
+ case kRef:
+ case kVoid:
+ case kBottom:
+ UNREACHABLE();
+ }
+
+ int bytes_already_set = sizeof(int64_t);
+
+ while (bytes_already_set * 2 <= bytes_to_set) {
+ std::memcpy(initial_element_address + bytes_already_set,
+ initial_element_address, bytes_already_set);
+ bytes_already_set *= 2;
+ }
+
+ if (bytes_already_set < bytes_to_set) {
+ std::memcpy(initial_element_address + bytes_already_set,
+ initial_element_address, bytes_to_set - bytes_already_set);
+ }
}
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index b3bfac7156..4ae78ce537 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -118,8 +118,10 @@ void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
uint32_t dst_index, Address raw_src_array,
uint32_t src_index, uint32_t length);
-void array_fill_with_zeroes_wrapper(Address raw_array, uint32_t length,
- uint32_t element_size_bytes);
+// The initial value is passed as an int64_t on the stack.
+void array_fill_with_number_or_null_wrapper(Address raw_array, uint32_t length,
+ uint32_t raw_type,
+ Address initial_value_addr);
using WasmTrapCallbackForTesting = void (*)();
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 80df28c2b9..5b5b6003dc 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -100,32 +100,8 @@
// #############################################################################
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
-#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Fixed-width SIMD operations. */ \
- /* https://github.com/webassembly/simd */ \
- /* V8 side owner: gdeepti, zhin */ \
- /* Staged in v8.7 * */ \
- /* Shipped in v9.1 * */ \
- V(simd, "SIMD opcodes", true) \
- \
- /* Threads proposal. */ \
- /* https://github.com/webassembly/threads */ \
- /* NOTE: This is enabled via chromium flag on desktop systems since v7.4, */ \
- /* and on android from 9.1. Threads are only available when */ \
- /* SharedArrayBuffers are enabled as well, and are gated by COOP/COEP */ \
- /* headers, more fine grained control is in the chromium codebase */ \
- /* ITS: https://groups.google.com/a/chromium.org/d/msg/blink-dev/ */ \
- /* tD6np-OG2PU/rcNGROOMFQAJ */ \
- /* V8 side owner: gdeepti */ \
- V(threads, "thread opcodes", true) \
- \
- /* Exception handling proposal. */ \
- /* https://github.com/WebAssembly/exception-handling */ \
- /* V8 side owner: thibaudm */ \
- /* Staged in v8.9 */ \
- /* Shipped in v9.5 */ \
- V(eh, "exception handling opcodes", true) \
- \
+#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */
+
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index e24c6b910d..ab26412d89 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -33,12 +33,7 @@ WasmFeatures WasmFeatures::FromIsolate(Isolate* isolate) {
WasmFeatures WasmFeatures::FromContext(Isolate* isolate,
Handle<Context> context) {
WasmFeatures features = WasmFeatures::FromFlags();
- if (isolate->IsWasmSimdEnabled(context)) {
- features.Add(kFeature_simd);
- }
- if (isolate->AreWasmExceptionsEnabled(context)) {
- features.Add(kFeature_eh);
- }
+ // This space intentionally left blank for future Wasm origin trials.
return features;
}
diff --git a/deps/v8/src/wasm/wasm-features.h b/deps/v8/src/wasm/wasm-features.h
index 12162db1fe..9d87947bad 100644
--- a/deps/v8/src/wasm/wasm-features.h
+++ b/deps/v8/src/wasm/wasm-features.h
@@ -15,8 +15,11 @@
#include "src/wasm/wasm-feature-flags.h"
// Features that are always enabled and do not have a flag.
-#define FOREACH_WASM_NON_FLAG_FEATURE(V) \
- V(reftypes, "reference type opcodes", true)
+#define FOREACH_WASM_NON_FLAG_FEATURE(V) \
+ V(eh, "exception handling opcodes", true) \
+ V(reftypes, "reference type opcodes", true) \
+ V(simd, "SIMD opcodes", true) \
+ V(threads, "thread opcodes", true)
// All features, including features that do not have flags.
#define FOREACH_WASM_FEATURE(V) \
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index e07431dd94..c334ce009c 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -32,9 +32,7 @@ class WasmImportWrapperCache {
uint32_t canonical_type_index, int expected_arity, Suspend suspend)
: kind(kind),
canonical_type_index(canonical_type_index),
- expected_arity(expected_arity == kDontAdaptArgumentsSentinel
- ? 0
- : expected_arity),
+ expected_arity(expected_arity),
suspend(suspend) {}
bool operator==(const CacheKey& rhs) const {
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 7806d88567..6cbfdbc837 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1174,8 +1174,8 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
string->StringEquals(v8_str(isolate, "eqref"))) {
type = i::wasm::kWasmEqRef;
} else if (enabled_features.has_gc() &&
- string->StringEquals(v8_str(isolate, "dataref"))) {
- type = i::wasm::kWasmDataRef;
+ string->StringEquals(v8_str(isolate, "structref"))) {
+ type = i::wasm::kWasmStructRef;
} else if (enabled_features.has_gc() &&
string->StringEquals(v8_str(isolate, "arrayref"))) {
type = i::wasm::kWasmArrayRef;
@@ -1293,27 +1293,22 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
auto shared = i::SharedFlag::kNotShared;
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- if (enabled_features.has_threads()) {
- // Shared property of descriptor
- Local<String> shared_key = v8_str(isolate, "shared");
- v8::MaybeLocal<v8::Value> maybe_value =
- descriptor->Get(context, shared_key);
- v8::Local<v8::Value> value;
- if (maybe_value.ToLocal(&value)) {
- shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared
- : i::SharedFlag::kNotShared;
- } else {
- DCHECK(i_isolate->has_scheduled_exception());
- return;
- }
+ // Shared property of descriptor
+ Local<String> shared_key = v8_str(isolate, "shared");
+ v8::MaybeLocal<v8::Value> maybe_value = descriptor->Get(context, shared_key);
+ v8::Local<v8::Value> value;
+ if (maybe_value.ToLocal(&value)) {
+ shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared
+ : i::SharedFlag::kNotShared;
+ } else {
+ DCHECK(i_isolate->has_scheduled_exception());
+ return;
+ }
- // Throw TypeError if shared is true, and the descriptor has no "maximum"
- if (shared == i::SharedFlag::kShared && maximum == -1) {
- thrower.TypeError(
- "If shared is true, maximum property should be defined.");
- return;
- }
+ // Throw TypeError if shared is true, and the descriptor has no "maximum"
+ if (shared == i::SharedFlag::kShared && maximum == -1) {
+ thrower.TypeError("If shared is true, maximum property should be defined.");
+ return;
}
i::Handle<i::JSObject> memory_obj;
@@ -1390,8 +1385,8 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
string->StringEquals(v8_str(isolate, "anyref"))) {
*type = i::wasm::kWasmAnyRef;
} else if (enabled_features.has_gc() &&
- string->StringEquals(v8_str(isolate, "dataref"))) {
- *type = i::wasm::kWasmDataRef;
+ string->StringEquals(v8_str(isolate, "structref"))) {
+ *type = i::wasm::kWasmStructRef;
} else if (enabled_features.has_gc() &&
string->StringEquals(v8_str(isolate, "arrayref"))) {
*type = i::wasm::kWasmArrayRef;
@@ -1747,7 +1742,7 @@ void EncodeExceptionValues(v8::Isolate* isolate,
case i::wasm::HeapType::kAny:
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kI31:
- case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kStruct:
case i::wasm::HeapType::kArray:
case i::wasm::HeapType::kString:
case i::wasm::HeapType::kStringViewWtf8:
@@ -2263,19 +2258,10 @@ void WasmObjectToJSReturnValue(v8::ReturnValue<v8::Value>& return_value,
return;
case i::wasm::HeapType::kBottom:
UNREACHABLE();
- case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kStruct:
case i::wasm::HeapType::kArray:
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kAny: {
- if (!i::v8_flags.wasm_gc_js_interop && value->IsWasmObject()) {
- // Transform wasm object into JS-compliant representation.
- i::Handle<i::JSObject> wrapper =
- isolate->factory()->NewJSObject(isolate->object_function());
- i::JSObject::AddProperty(
- isolate, wrapper, isolate->factory()->wasm_wrapped_object_symbol(),
- value, i::NONE);
- value = wrapper;
- }
return_value.Set(Utils::ToLocal(value));
return;
}
@@ -2287,17 +2273,6 @@ void WasmObjectToJSReturnValue(v8::ReturnValue<v8::Value>& return_value,
i::Handle<i::WasmInternalFunction>::cast(value)->external(),
isolate);
}
- return_value.Set(Utils::ToLocal(value));
- return;
- }
- if (!i::v8_flags.wasm_gc_js_interop && value->IsWasmObject()) {
- // Transform wasm object into JS-compliant representation.
- i::Handle<i::JSObject> wrapper =
- isolate->factory()->NewJSObject(isolate->object_function());
- i::JSObject::AddProperty(
- isolate, wrapper, isolate->factory()->wasm_wrapped_object_symbol(),
- value, i::NONE);
- value = wrapper;
}
return_value.Set(Utils::ToLocal(value));
return;
@@ -2572,7 +2547,7 @@ void WebAssemblyExceptionGetArg(
case i::wasm::HeapType::kAny:
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kI31:
- case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kStruct:
case i::wasm::HeapType::kArray:
case i::wasm::HeapType::kString:
case i::wasm::HeapType::kStringViewWtf8:
@@ -2636,7 +2611,7 @@ void WebAssemblyExceptionGetArg(
case i::wasm::HeapType::kEq:
case i::wasm::HeapType::kI31:
case i::wasm::HeapType::kArray:
- case i::wasm::HeapType::kData:
+ case i::wasm::HeapType::kStruct:
case i::wasm::HeapType::kString:
case i::wasm::HeapType::kStringViewWtf8:
case i::wasm::HeapType::kStringViewWtf16:
@@ -3094,29 +3069,27 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
}
// Setup Exception
- if (enabled_features.has_eh()) {
- Handle<JSFunction> tag_constructor =
- InstallConstructorFunc(isolate, webassembly, "Tag", WebAssemblyTag);
- Handle<JSObject> tag_proto =
- SetupConstructor(isolate, tag_constructor, i::WASM_TAG_OBJECT_TYPE,
- WasmTagObject::kHeaderSize, "WebAssembly.Tag");
- context->set_wasm_tag_constructor(*tag_constructor);
-
- if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, tag_proto, "type", WebAssemblyTagType, 0);
- }
- // Set up runtime exception constructor.
- Handle<JSFunction> exception_constructor = InstallConstructorFunc(
- isolate, webassembly, "Exception", WebAssemblyException);
- SetDummyInstanceTemplate(isolate, exception_constructor);
- Handle<JSObject> exception_proto = SetupConstructor(
- isolate, exception_constructor, i::WASM_EXCEPTION_PACKAGE_TYPE,
- WasmExceptionPackage::kHeaderSize, "WebAssembly.Exception");
- InstallFunc(isolate, exception_proto, "getArg", WebAssemblyExceptionGetArg,
- 2);
- InstallFunc(isolate, exception_proto, "is", WebAssemblyExceptionIs, 1);
- context->set_wasm_exception_constructor(*exception_constructor);
- }
+ Handle<JSFunction> tag_constructor =
+ InstallConstructorFunc(isolate, webassembly, "Tag", WebAssemblyTag);
+ Handle<JSObject> tag_proto =
+ SetupConstructor(isolate, tag_constructor, i::WASM_TAG_OBJECT_TYPE,
+ WasmTagObject::kHeaderSize, "WebAssembly.Tag");
+ context->set_wasm_tag_constructor(*tag_constructor);
+
+ if (enabled_features.has_type_reflection()) {
+ InstallFunc(isolate, tag_proto, "type", WebAssemblyTagType, 0);
+ }
+ // Set up runtime exception constructor.
+ Handle<JSFunction> exception_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Exception", WebAssemblyException);
+ SetDummyInstanceTemplate(isolate, exception_constructor);
+ Handle<JSObject> exception_proto = SetupConstructor(
+ isolate, exception_constructor, i::WASM_EXCEPTION_PACKAGE_TYPE,
+ WasmExceptionPackage::kHeaderSize, "WebAssembly.Exception");
+ InstallFunc(isolate, exception_proto, "getArg", WebAssemblyExceptionGetArg,
+ 2);
+ InstallFunc(isolate, exception_proto, "is", WebAssemblyExceptionIs, 1);
+ context->set_wasm_exception_constructor(*exception_constructor);
// Setup Suspender.
if (enabled_features.has_stack_switching()) {
@@ -3176,55 +3149,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// static
void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<Context> context) {
- // Exception handling may have been enabled by an origin trial. If so, make
- // sure that the {WebAssembly.Tag} constructor is set up.
- auto enabled_features = i::wasm::WasmFeatures::FromContext(isolate, context);
- if (enabled_features.has_eh()) {
- Handle<JSGlobalObject> global = handle(context->global_object(), isolate);
- MaybeHandle<Object> maybe_webassembly =
- JSObject::GetProperty(isolate, global, "WebAssembly");
- Handle<Object> webassembly_obj;
- if (!maybe_webassembly.ToHandle(&webassembly_obj) ||
- !webassembly_obj->IsJSObject()) {
- // There is no {WebAssembly} object, or it's not what we expect.
- // Just return without adding the {Tag} constructor.
- return;
- }
- Handle<JSObject> webassembly = Handle<JSObject>::cast(webassembly_obj);
- // Setup Tag.
- Handle<String> tag_name = v8_str(isolate, "Tag");
- // The {WebAssembly} object may already have been modified. The following
- // code is designed to:
- // - check for existing {Tag} properties on the object itself, and avoid
- // overwriting them or adding duplicate properties
- // - disregard any setters or read-only properties on the prototype chain
- // - only make objects accessible to user code after all internal setup
- // has been completed.
- if (JSObject::HasOwnProperty(isolate, webassembly, tag_name)
- .FromMaybe(true)) {
- // Existing property, or exception.
- return;
- }
-
- bool has_prototype = true;
- Handle<JSFunction> tag_constructor =
- CreateFunc(isolate, tag_name, WebAssemblyTag, has_prototype,
- SideEffectType::kHasNoSideEffect);
- tag_constructor->shared().set_length(1);
- context->set_wasm_tag_constructor(*tag_constructor);
- Handle<JSObject> tag_proto =
- SetupConstructor(isolate, tag_constructor, i::WASM_TAG_OBJECT_TYPE,
- WasmTagObject::kHeaderSize, "WebAssembly.Tag");
- if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, tag_proto, "type", WebAssemblyTagType, 0);
- }
- LookupIterator it(isolate, webassembly, tag_name, LookupIterator::OWN);
- Maybe<bool> result = JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, tag_constructor, DONT_ENUM, Just(kDontThrow));
- // This could still fail if the object was non-extensible, but now we
- // return anyway so there's no need to even check.
- USE(result);
- }
+ // This space left blank for future origin trials.
}
#undef ASSIGN
#undef EXTRACT_THIS
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index ac90527ff9..235d9de4d1 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -59,13 +59,13 @@ class WireBytesRef {
// Static representation of a wasm function.
struct WasmFunction {
- const FunctionSig* sig; // signature of the function.
- uint32_t func_index; // index into the function table.
- uint32_t sig_index; // index into the signature table.
- WireBytesRef code; // code of this function.
- bool imported;
- bool exported;
- bool declared;
+ const FunctionSig* sig = nullptr; // signature of the function.
+ uint32_t func_index = 0; // index into the function table.
+ uint32_t sig_index = 0; // index into the signature table.
+ WireBytesRef code = {}; // code of this function.
+ bool imported = false;
+ bool exported = false;
+ bool declared = false;
};
// Static representation of a wasm global variable.
@@ -494,9 +494,11 @@ struct V8_EXPORT_PRIVATE WasmModule {
// mutable.
uint32_t untagged_globals_buffer_size = 0;
uint32_t tagged_globals_buffer_size = 0;
+ uint32_t num_imported_globals = 0;
uint32_t num_imported_mutable_globals = 0;
uint32_t num_imported_functions = 0;
uint32_t num_imported_tables = 0;
+ uint32_t num_imported_tags = 0;
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
@@ -508,6 +510,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
// ID and length).
WireBytesRef name_section = {0, 0};
+ AccountingAllocator* allocator() const { return signature_zone->allocator(); }
+
void add_type(TypeDefinition type) {
types.push_back(type);
// Isorecursive canonical type will be computed later.
@@ -568,6 +572,36 @@ struct V8_EXPORT_PRIVATE WasmModule {
isorecursive_canonical_type_ids.end());
}
+ bool function_was_validated(int func_index) const {
+ DCHECK_NOT_NULL(validated_functions);
+ static_assert(sizeof(validated_functions[0]) == 1);
+ DCHECK_LE(num_imported_functions, func_index);
+ int pos = func_index - num_imported_functions;
+ DCHECK_LE(pos, num_declared_functions);
+ uint8_t byte =
+ validated_functions[pos >> 3].load(std::memory_order_relaxed);
+ return byte & (1 << (pos & 7));
+ }
+
+ void set_function_validated(int func_index) const {
+ DCHECK_NOT_NULL(validated_functions);
+ DCHECK_LE(num_imported_functions, func_index);
+ int pos = func_index - num_imported_functions;
+ DCHECK_LE(pos, num_declared_functions);
+ std::atomic<uint8_t>* atomic_byte = &validated_functions[pos >> 3];
+ uint8_t old_byte = atomic_byte->load(std::memory_order_relaxed);
+ uint8_t new_bit = 1 << (pos & 7);
+ while ((old_byte & new_bit) == 0 &&
+ !atomic_byte->compare_exchange_weak(old_byte, old_byte | new_bit,
+ std::memory_order_relaxed)) {
+ // Retry with updated {old_byte}.
+ }
+ }
+
+ base::Vector<const WasmFunction> declared_functions() const {
+ return base::VectorOf(functions) + num_imported_functions;
+ }
+
std::vector<TypeDefinition> types; // by type index
// Maps each type index to its global (cross-module) canonical index as per
// isorecursive type canonicalization.
@@ -595,6 +629,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
// from asm.js.
std::unique_ptr<AsmJsOffsetInformation> asm_js_offset_information;
+ // {validated_functions} is atomically updated when functions get validated
+ // (during compilation, streaming decoding, or via explicit validation).
+ static_assert(sizeof(std::atomic<uint8_t>) == 1);
+ static_assert(alignof(std::atomic<uint8_t>) == 1);
+ mutable std::unique_ptr<std::atomic<uint8_t>[]> validated_functions;
+
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
WasmModule(const WasmModule&) = delete;
WasmModule& operator=(const WasmModule&) = delete;
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index b83ead736a..a73bb9a76a 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -238,8 +238,8 @@ ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
kIndirectFunctionTableRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tags_table, FixedArray, kTagsTableOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_internal_functions, FixedArray,
- kWasmInternalFunctionsOffset)
+ACCESSORS(WasmInstanceObject, wasm_internal_functions, FixedArray,
+ kWasmInternalFunctionsOffset)
ACCESSORS(WasmInstanceObject, managed_object_maps, FixedArray,
kManagedObjectMapsOffset)
ACCESSORS(WasmInstanceObject, feedback_vectors, FixedArray,
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index caa2440104..4c2bfe0344 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -267,8 +267,6 @@ MaybeHandle<Object> WasmTableObject::JSToWasmElement(
const char** error_message) {
// Any `entry` has to be in its JS representation.
DCHECK(!entry->IsWasmInternalFunction());
- DCHECK_IMPLIES(!v8_flags.wasm_gc_js_interop,
- !entry->IsWasmArray() && !entry->IsWasmStruct());
const WasmModule* module =
!table->instance().IsUndefined()
? WasmInstanceObject::cast(table->instance()).module()
@@ -325,7 +323,7 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
case wasm::HeapType::kStringViewWtf16:
case wasm::HeapType::kStringViewIter:
case wasm::HeapType::kEq:
- case wasm::HeapType::kData:
+ case wasm::HeapType::kStruct:
case wasm::HeapType::kArray:
case wasm::HeapType::kAny:
case wasm::HeapType::kI31:
@@ -373,7 +371,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
case wasm::HeapType::kString:
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
- case wasm::HeapType::kData:
+ case wasm::HeapType::kStruct:
case wasm::HeapType::kArray:
case wasm::HeapType::kAny:
return entry;
@@ -917,7 +915,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// Check if the non-shared memory could grow in-place.
if (result_inplace.has_value()) {
// Detach old and create a new one with the grown backing store.
- old_buffer->Detach(true);
+ JSArrayBuffer::Detach(old_buffer, true).Check();
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
memory_object->update_instances(isolate, new_buffer);
@@ -957,7 +955,7 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
}
// Detach old and create a new one with the new backing store.
- old_buffer->Detach(true);
+ JSArrayBuffer::Detach(old_buffer, true).Check();
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSArrayBuffer(std::move(new_backing_store));
memory_object->update_instances(isolate, new_buffer);
@@ -1182,6 +1180,11 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_hook_on_function_call_address(
isolate->debug()->hook_on_function_call_address());
instance->set_managed_object_maps(*isolate->factory()->empty_fixed_array());
+ // TODO(manoskouk): Initialize this array with zeroes, and check for zero in
+ // wasm-compiler.
+ Handle<FixedArray> functions = isolate->factory()->NewFixedArray(
+ static_cast<int>(module->functions.size()));
+ instance->set_wasm_internal_functions(*functions);
instance->set_feedback_vectors(*isolate->factory()->empty_fixed_array());
instance->set_tiering_budget_array(
module_object->native_module()->tiering_budget_array());
@@ -1325,15 +1328,10 @@ base::Optional<MessageTemplate> WasmInstanceObject::InitTableEntries(
MaybeHandle<WasmInternalFunction> WasmInstanceObject::GetWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index) {
- MaybeHandle<WasmInternalFunction> result;
- if (instance->has_wasm_internal_functions()) {
- Object val = instance->wasm_internal_functions().get(index);
- if (!val.IsUndefined(isolate)) {
- result = Handle<WasmInternalFunction>(WasmInternalFunction::cast(val),
- isolate);
- }
- }
- return result;
+ Object val = instance->wasm_internal_functions().get(index);
+ return val.IsWasmInternalFunction()
+ ? handle(WasmInternalFunction::cast(val), isolate)
+ : MaybeHandle<WasmInternalFunction>();
}
Handle<WasmInternalFunction>
@@ -1342,10 +1340,8 @@ WasmInstanceObject::GetOrCreateWasmInternalFunction(
MaybeHandle<WasmInternalFunction> maybe_result =
WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
function_index);
-
- Handle<WasmInternalFunction> result;
- if (maybe_result.ToHandle(&result)) {
- return result;
+ if (!maybe_result.is_null()) {
+ return maybe_result.ToHandleChecked();
}
Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
@@ -1378,28 +1374,17 @@ WasmInstanceObject::GetOrCreateWasmInternalFunction(
auto external = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
isolate, instance, function_index,
static_cast<int>(function.sig->parameter_count()), wrapper));
- result =
+ Handle<WasmInternalFunction> result =
WasmInternalFunction::FromExternal(external, isolate).ToHandleChecked();
- WasmInstanceObject::SetWasmInternalFunction(isolate, instance, function_index,
- result);
+ WasmInstanceObject::SetWasmInternalFunction(instance, function_index, result);
return result;
}
void WasmInstanceObject::SetWasmInternalFunction(
- Isolate* isolate, Handle<WasmInstanceObject> instance, int index,
+ Handle<WasmInstanceObject> instance, int index,
Handle<WasmInternalFunction> val) {
- Handle<FixedArray> functions;
- if (!instance->has_wasm_internal_functions()) {
- // Lazily allocate the wasm external functions array.
- functions = isolate->factory()->NewFixedArray(
- static_cast<int>(instance->module()->functions.size()));
- instance->set_wasm_internal_functions(*functions);
- } else {
- functions =
- Handle<FixedArray>(instance->wasm_internal_functions(), isolate);
- }
- functions->set(index, *val);
+ instance->wasm_internal_functions().set(index, *val);
}
// static
@@ -2214,24 +2199,6 @@ Handle<AsmWasmData> AsmWasmData::New(
return result;
}
-namespace {
-// If {in_out_value} is a wrapped wasm struct/array, it gets unwrapped in-place
-// and this returns {true}. Otherwise, the value remains unchanged and this
-// returns {false}.
-bool TryUnpackObjectWrapper(Isolate* isolate, Handle<Object>& in_out_value) {
- if (in_out_value->IsUndefined(isolate) || in_out_value->IsNull(isolate) ||
- !in_out_value->IsJSObject()) {
- return false;
- }
- Handle<Name> key = isolate->factory()->wasm_wrapped_object_symbol();
- LookupIterator it(isolate, in_out_value, key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() != LookupIterator::DATA) return false;
- in_out_value = it.GetDataValue();
- return true;
-}
-} // namespace
-
namespace wasm {
MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
Handle<Object> value, ValueType expected,
@@ -2257,8 +2224,6 @@ MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
}
V8_FALLTHROUGH;
case kRef: {
- // TODO(7748): Follow any changes in proposed JS API. In particular,
- // finalize the v8_flags.wasm_gc_js_interop situation.
// TODO(7748): Allow all in-range numbers for i31. Make sure to convert
// Smis to i31refs if needed.
// TODO(7748): Streamline interaction of undefined and (ref any).
@@ -2284,28 +2249,21 @@ MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
return {};
}
case HeapType::kAny: {
- if (!v8_flags.wasm_gc_js_interop) {
- TryUnpackObjectWrapper(isolate, value);
- }
if (!value->IsNull(isolate)) return value;
*error_message = "null is not allowed for (ref any)";
return {};
}
- case HeapType::kData: {
- if (v8_flags.wasm_gc_js_interop
- ? value->IsWasmStruct() || value->IsWasmArray()
- : TryUnpackObjectWrapper(isolate, value)) {
+ case HeapType::kStruct: {
+ if (value->IsWasmStruct() ||
+ (value->IsWasmArray() && v8_flags.wasm_gc_structref_as_dataref)) {
return value;
}
*error_message =
- "dataref object must be null (if nullable) or a wasm "
- "struct/array";
+ "structref object must be null (if nullable) or a wasm struct";
return {};
}
case HeapType::kArray: {
- if ((v8_flags.wasm_gc_js_interop ||
- TryUnpackObjectWrapper(isolate, value)) &&
- value->IsWasmArray()) {
+ if (value->IsWasmArray()) {
return value;
}
*error_message =
@@ -2313,10 +2271,7 @@ MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
return {};
}
case HeapType::kEq: {
- if (value->IsSmi() ||
- (v8_flags.wasm_gc_js_interop
- ? value->IsWasmStruct() || value->IsWasmArray()
- : TryUnpackObjectWrapper(isolate, value))) {
+ if (value->IsSmi() || value->IsWasmStruct() || value->IsWasmArray()) {
return value;
}
*error_message =
@@ -2403,9 +2358,7 @@ MaybeHandle<Object> JSToWasmObject(Isolate* isolate, const WasmModule* module,
// A struct or array type with index is expected.
DCHECK(module->has_struct(expected.ref_index()) ||
module->has_array(expected.ref_index()));
- if (v8_flags.wasm_gc_js_interop
- ? !value->IsWasmStruct() && !value->IsWasmArray()
- : !TryUnpackObjectWrapper(isolate, value)) {
+ if (!value->IsWasmStruct() && !value->IsWasmArray()) {
*error_message = "object incompatible with wasm type";
return {};
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index a81a23fe2f..b3b99175a1 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -342,7 +342,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_targets, FixedAddressArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(tags_table, FixedArray)
- DECL_OPTIONAL_ACCESSORS(wasm_internal_functions, FixedArray)
+ DECL_ACCESSORS(wasm_internal_functions, FixedArray)
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_ACCESSORS(feedback_vectors, FixedArray)
DECL_SANDBOXED_POINTER_ACCESSORS(memory_start, byte*)
@@ -510,8 +510,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
Isolate* isolate, Handle<WasmInstanceObject> instance,
int function_index);
- static void SetWasmInternalFunction(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
+ static void SetWasmInternalFunction(Handle<WasmInstanceObject> instance,
int index,
Handle<WasmInternalFunction> val);
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 7d4c80d0b8..685645a4f2 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -60,9 +60,9 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(CallIndirect, 0x11, _, "call_indirect") \
V(ReturnCall, 0x12, _, "return_call") \
V(ReturnCallIndirect, 0x13, _, "return_call_indirect") \
- V(CallRefDeprecated, 0x14, _, "call_ref") /* typed_funcref prototype */ \
+ V(CallRef, 0x14, _, "call_ref") /* typed_funcref prototype */ \
V(ReturnCallRef, 0x15, _, "return_call_ref") /* typed_funcref prototype */ \
- V(CallRef, 0x17, _, "call_ref") /* temporary, for compat.*/ \
+ V(CallRefDeprecated, 0x17, _, "call_ref") /* temporary, for compat.*/ \
V(Drop, 0x1a, _, "drop") \
V(Select, 0x1b, _, "select") \
V(SelectWithType, 0x1c, _, "select") \
@@ -710,20 +710,22 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RefTest, 0xfb40, _, "ref.test") \
V(RefTestNull, 0xfb48, _, "ref.test null") \
V(RefTestDeprecated, 0xfb44, _, "ref.test") \
- V(RefCast, 0xfb45, _, "ref.cast") \
+ V(RefCast, 0xfb41, _, "ref.cast") \
+ V(RefCastNull, 0xfb49, _, "ref.cast null") \
+ V(RefCastDeprecated, 0xfb45, _, "ref.cast") \
V(BrOnCast, 0xfb46, _, "br_on_cast") \
V(BrOnCastFail, 0xfb47, _, "br_on_cast_fail") \
V(RefCastNop, 0xfb4c, _, "ref.cast_nop") \
- V(RefIsData, 0xfb51, _, "ref.is_data") \
+ V(RefIsStruct, 0xfb51, _, "ref.is_struct") \
V(RefIsI31, 0xfb52, _, "ref.is_i31") \
V(RefIsArray, 0xfb53, _, "ref.is_array") \
- V(RefAsData, 0xfb59, _, "ref.as_data") \
+ V(RefAsStruct, 0xfb59, _, "ref.as_struct") \
V(RefAsI31, 0xfb5a, _, "ref.as_i31") \
V(RefAsArray, 0xfb5b, _, "ref.as_array") \
- V(BrOnData, 0xfb61, _, "br_on_data") \
+ V(BrOnStruct, 0xfb61, _, "br_on_struct") \
V(BrOnI31, 0xfb62, _, "br_on_i31") \
V(BrOnArray, 0xfb66, _, "br_on_array") \
- V(BrOnNonData, 0xfb64, _, "br_on_non_data") \
+ V(BrOnNonStruct, 0xfb64, _, "br_on_non_struct") \
V(BrOnNonI31, 0xfb65, _, "br_on_non_i31") \
V(BrOnNonArray, 0xfb67, _, "br_on_non_array") \
V(ExternInternalize, 0xfb70, _, "extern.internalize") \
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index 8cc6ba872f..0b74af4c70 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -4,6 +4,7 @@
#include "src/wasm/wasm-subtyping.h"
+#include "src/base/v8-fallthrough.h"
#include "src/wasm/canonical-types.h"
#include "src/wasm/wasm-module.h"
@@ -104,7 +105,7 @@ HeapType::Representation NullSentinelImpl(HeapType type,
case HeapType::kI31:
case HeapType::kNone:
case HeapType::kEq:
- case HeapType::kData:
+ case HeapType::kStruct:
case HeapType::kArray:
case HeapType::kAny:
case HeapType::kString:
@@ -211,12 +212,17 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsHeapSubtypeOfImpl(
case HeapType::kExtern:
return super_heap == HeapType::kExtern;
case HeapType::kI31:
- case HeapType::kData:
+ case HeapType::kStruct:
+ case HeapType::kArray:
+ if (v8_flags.wasm_gc_structref_as_dataref &&
+ sub_heap.representation() == HeapType::kArray) {
+ // TODO(7748): Remove temporary workaround for backwards compatibility.
+ return super_heap == HeapType::kArray ||
+ super_heap == HeapType::kStruct || super_heap == HeapType::kEq ||
+ super_heap == HeapType::kAny;
+ }
return super_heap == sub_heap || super_heap == HeapType::kEq ||
super_heap == HeapType::kAny;
- case HeapType::kArray:
- return super_heap == HeapType::kArray || super_heap == HeapType::kData ||
- super_heap == HeapType::kEq || super_heap == HeapType::kAny;
case HeapType::kString:
// stringref is a subtype of anyref under wasm-gc.
return sub_heap == super_heap ||
@@ -256,8 +262,12 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsHeapSubtypeOfImpl(
switch (super_heap.representation()) {
case HeapType::kFunc:
return sub_module->has_signature(sub_index);
+ case HeapType::kStruct:
+ if (!v8_flags.wasm_gc_structref_as_dataref) {
+ return sub_module->has_struct(sub_index);
+ }
+ V8_FALLTHROUGH;
case HeapType::kEq:
- case HeapType::kData:
case HeapType::kAny:
return !sub_module->has_signature(sub_index);
case HeapType::kArray:
@@ -345,14 +355,25 @@ HeapType::Representation CommonAncestor(uint32_t type_index1,
DCHECK_EQ(kind2, kind1);
return HeapType::kFunc;
case TypeDefinition::kStruct:
- DCHECK_NE(kind2, TypeDefinition::kFunction);
- return HeapType::kData;
+ if (v8_flags.wasm_gc_structref_as_dataref) {
+ DCHECK_NE(kind2, TypeDefinition::kFunction);
+ return HeapType::kStruct;
+ }
+ switch (kind2) {
+ case TypeDefinition::kFunction:
+ UNREACHABLE();
+ case TypeDefinition::kStruct:
+ return HeapType::kStruct;
+ case TypeDefinition::kArray:
+ return HeapType::kEq;
+ }
case TypeDefinition::kArray:
switch (kind2) {
case TypeDefinition::kFunction:
UNREACHABLE();
case TypeDefinition::kStruct:
- return HeapType::kData;
+ return v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
+ : HeapType::kEq;
case TypeDefinition::kArray:
return HeapType::kArray;
}
@@ -361,6 +382,9 @@ HeapType::Representation CommonAncestor(uint32_t type_index1,
// Returns the least common ancestor of a generic HeapType {heap1}, and
// another HeapType {heap2}.
+// TODO(7748): This function sometimes assumes that incompatible types cannot be
+// compared, in some cases explicitly and in others implicitly. Make it
+// consistent.
HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
HeapType heap2,
const WasmModule* module2) {
@@ -380,7 +404,7 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
case HeapType::kNone:
return HeapType::kI31;
case HeapType::kEq:
- case HeapType::kData:
+ case HeapType::kStruct:
case HeapType::kArray:
return HeapType::kEq;
case HeapType::kAny:
@@ -394,12 +418,14 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
: HeapType::kEq;
}
- case HeapType::kData:
+ case HeapType::kStruct:
switch (heap2.representation()) {
- case HeapType::kData:
- case HeapType::kArray:
+ case HeapType::kStruct:
case HeapType::kNone:
- return HeapType::kData;
+ return HeapType::kStruct;
+ case HeapType::kArray:
+ return v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
+ : HeapType::kEq;
case HeapType::kI31:
case HeapType::kEq:
return HeapType::kEq;
@@ -412,15 +438,18 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
UNREACHABLE();
default:
return module2->has_signature(heap2.ref_index()) ? HeapType::kBottom
- : HeapType::kData;
+ : module2->has_struct(heap2.ref_index()) ? HeapType::kStruct
+ : v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
+ : HeapType::kEq;
}
case HeapType::kArray:
switch (heap2.representation()) {
case HeapType::kArray:
case HeapType::kNone:
return HeapType::kArray;
- case HeapType::kData:
- return HeapType::kData;
+ case HeapType::kStruct:
+ return v8_flags.wasm_gc_structref_as_dataref ? HeapType::kStruct
+ : HeapType::kEq;
case HeapType::kI31:
case HeapType::kEq:
return HeapType::kEq;
@@ -432,9 +461,12 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
case HeapType::kNoFunc:
UNREACHABLE();
default:
- return module2->has_array(heap2.ref_index()) ? HeapType::kArray
- : module2->has_struct(heap2.ref_index()) ? HeapType::kData
- : HeapType::kBottom;
+ return module2->has_array(heap2.ref_index()) ? HeapType::kArray
+ : module2->has_struct(heap2.ref_index())
+ ? (v8_flags.wasm_gc_structref_as_dataref
+ ? HeapType::kStruct
+ : HeapType::kEq)
+ : HeapType::kBottom;
}
case HeapType::kAny:
return HeapType::kAny;
@@ -446,7 +478,7 @@ HeapType::Representation CommonAncestorWithGeneric(HeapType heap1,
switch (heap2.representation()) {
case HeapType::kArray:
case HeapType::kNone:
- case HeapType::kData:
+ case HeapType::kStruct:
case HeapType::kI31:
case HeapType::kEq:
case HeapType::kAny: