summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/OWNERS1
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h11
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h44
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h161
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc32
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h11
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc490
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h21
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h15
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h15
-rw-r--r--deps/v8/src/wasm/baseline/ppc/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h168
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h306
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h358
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h153
-rw-r--r--deps/v8/src/wasm/branch-hint-map.h46
-rw-r--r--deps/v8/src/wasm/c-api.cc46
-rw-r--r--deps/v8/src/wasm/compilation-environment.h12
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h748
-rw-r--r--deps/v8/src/wasm/function-compiler.cc17
-rw-r--r--deps/v8/src/wasm/function-compiler.h9
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc177
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h3
-rw-r--r--deps/v8/src/wasm/memory-protection-key.cc189
-rw-r--r--deps/v8/src/wasm/memory-protection-key.h90
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc2
-rw-r--r--deps/v8/src/wasm/module-compiler.cc65
-rw-r--r--deps/v8/src/wasm/module-decoder.cc360
-rw-r--r--deps/v8/src/wasm/module-decoder.h10
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc48
-rw-r--r--deps/v8/src/wasm/value-type.h39
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc439
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h119
-rw-r--r--deps/v8/src/wasm/wasm-constants.h7
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc10
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc78
-rw-r--r--deps/v8/src/wasm/wasm-engine.h12
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h14
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.cc57
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.h150
-rw-r--r--deps/v8/src/wasm/wasm-js.cc35
-rw-r--r--deps/v8/src/wasm/wasm-limits.h5
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc12
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h2
-rw-r--r--deps/v8/src/wasm/wasm-module.cc14
-rw-r--r--deps/v8/src/wasm/wasm-module.h32
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h38
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc130
-rw-r--r--deps/v8/src/wasm/wasm-objects.h43
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq22
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h8
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc3
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h185
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc6
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc71
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h14
-rw-r--r--deps/v8/src/wasm/wasm-value.h2
57 files changed, 3112 insertions, 2048 deletions
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index d0de7de935..516dd84d6e 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -3,5 +3,6 @@ bbudge@chromium.org
clemensb@chromium.org
gdeepti@chromium.org
jkummerow@chromium.org
+manoskouk@chromium.org
thibaudm@chromium.org
zhin@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 7acdf635c9..acc7f08fa0 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -605,6 +605,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
str(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
}
@@ -743,7 +745,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
: MemOperand(dst_addr, actual_offset_reg);
str(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
// The write barrier.
Label write_barrier;
@@ -758,7 +760,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CallRecordWriteStub(dst_addr,
actual_offset_reg == no_reg ? Operand(offset_imm)
: Operand(actual_offset_reg),
- EMIT_REMEMBERED_SET, kSaveFPRegs,
+ RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -766,8 +768,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
liftoff::LoadInternal(this, dst, src_addr, offset_reg,
@@ -4228,6 +4229,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(sp, sp, Operand(size));
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index bea5100ef3..38d424d8e0 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -126,23 +126,13 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
- Register offset, T offset_imm,
- bool i64_offset = false) {
+ Register offset, T offset_imm) {
if (offset.is_valid()) {
- if (offset_imm == 0) {
- return i64_offset ? MemOperand(addr.X(), offset.X())
- : MemOperand(addr.X(), offset.W(), UXTW);
- }
+ if (offset_imm == 0) return MemOperand(addr.X(), offset.X());
+ Register tmp = temps->AcquireX();
DCHECK_GE(kMaxUInt32, offset_imm);
- if (i64_offset) {
- Register tmp = temps->AcquireX();
- assm->Add(tmp, offset.X(), offset_imm);
- return MemOperand(addr.X(), tmp);
- } else {
- Register tmp = temps->AcquireW();
- assm->Add(tmp, offset.W(), offset_imm);
- return MemOperand(addr.X(), tmp, UXTW);
- }
+ assm->Add(tmp, offset.X(), offset_imm);
+ return MemOperand(addr.X(), tmp);
}
return MemOperand(addr.X(), offset_imm);
}
@@ -440,6 +430,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
}
@@ -474,7 +466,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
// The write barrier.
Label write_barrier;
@@ -489,22 +481,22 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
&exit);
- CallRecordWriteStub(
- dst_addr,
- dst_op.IsRegisterOffset() ? Operand(dst_op.regoffset().X())
- : Operand(dst_op.offset()),
- EMIT_REMEMBERED_SET, kSaveFPRegs, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr,
+ dst_op.IsRegisterOffset()
+ ? Operand(dst_op.regoffset().X())
+ : Operand(dst_op.offset()),
+ RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ wasm::WasmCode::kRecordWrite);
bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
UseScratchRegisterScope temps(this);
- MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
- offset_imm, i64_offset);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
@@ -3232,6 +3224,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Drop(size, 1);
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
// The stack pointer is required to be quadword aligned.
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index e597467c73..9f35b5efc3 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -332,6 +332,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
mov(liftoff::GetInstanceOperand(), instance);
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
}
@@ -365,7 +367,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
: Operand(dst_addr, offset_reg, times_1, offset_imm);
mov(dst_op, src.gp());
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
@@ -380,16 +382,15 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
lea(scratch, dst_op);
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());
@@ -2933,15 +2934,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), src.fp(), src.fp(), 0);
- } else {
- if (dst.fp() != src.fp()) {
- movss(dst.fp(), src.fp());
- }
- shufps(dst.fp(), src.fp(), 0);
- }
+ F32x4Splat(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -3263,13 +3256,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Pcmpeqd(dst.fp(), dst.fp());
- Pxor(dst.fp(), src.fp());
- } else {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
- }
+ S128Not(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3850,16 +3837,7 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- DoubleRegister reg =
- dst.fp() == src.fp() ? liftoff::kScratchDoubleReg : dst.fp();
- Pxor(reg, reg);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsubq(dst.fp(), reg, src.fp());
- } else {
- psubq(reg, src.fp());
- if (dst.fp() != reg) movaps(dst.fp(), reg);
- }
+ I64x2Neg(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
@@ -3893,7 +3871,7 @@ void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
// Set up a mask [0x80000000,0,0x80000000,0].
Pcmpeqb(tmp, tmp);
- Psllq(tmp, tmp, 63);
+ Psllq(tmp, tmp, byte{63});
Psrlq(tmp, tmp, shift);
if (CpuFeatures::IsSupported(AVX)) {
@@ -3912,11 +3890,11 @@ void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
XMMRegister tmp = liftoff::kScratchDoubleReg;
- int32_t shift = rhs & 63;
+ byte shift = rhs & 63;
// Set up a mask [0x80000000,0,0x80000000,0].
Pcmpeqb(tmp, tmp);
- Psllq(tmp, tmp, 63);
+ Psllq(tmp, tmp, byte{63});
Psrlq(tmp, tmp, shift);
liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
@@ -3960,13 +3938,13 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Movaps(tmp1.fp(), lhs.fp());
Movaps(tmp2.fp(), rhs.fp());
// Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), 32);
+ Psrlq(tmp1.fp(), byte{32});
Pmuludq(tmp1.fp(), tmp1.fp(), rhs.fp());
// Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), 32);
+ Psrlq(tmp2.fp(), byte{32});
Pmuludq(tmp2.fp(), tmp2.fp(), lhs.fp());
Paddq(tmp2.fp(), tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), tmp2.fp(), 32);
+ Psllq(tmp2.fp(), tmp2.fp(), byte{32});
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
this, dst, lhs, rhs);
Paddq(dst.fp(), dst.fp(), tmp2.fp());
@@ -4029,11 +4007,11 @@ void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 1);
+ Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
Andps(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), dst.fp(), 1);
+ Psrld(dst.fp(), dst.fp(), byte{1});
Andps(dst.fp(), src.fp());
}
}
@@ -4042,11 +4020,11 @@ void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 31);
+ Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
Xorps(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), dst.fp(), 31);
+ Pslld(dst.fp(), dst.fp(), byte{31});
Xorps(dst.fp(), src.fp());
}
}
@@ -4185,11 +4163,11 @@ void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 1);
+ Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
Andpd(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), dst.fp(), 1);
+ Psrlq(dst.fp(), dst.fp(), byte{1});
Andpd(dst.fp(), src.fp());
}
}
@@ -4198,11 +4176,11 @@ void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 63);
+ Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{63});
Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), dst.fp(), 63);
+ Psllq(dst.fp(), dst.fp(), byte{63});
Xorpd(dst.fp(), src.fp());
}
}
@@ -4266,61 +4244,12 @@ void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminpd(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- minpd(liftoff::kScratchDoubleReg, dst.fp());
- minpd(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- minpd(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minpd(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orpd(liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordpd(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Orpd(liftoff::kScratchDoubleReg, dst.fp());
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
+ F64x2Min(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxpd(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- maxpd(liftoff::kScratchDoubleReg, dst.fp());
- maxpd(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- maxpd(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxpd(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orpd(liftoff::kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subpd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordpd(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
+ F64x2Max(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4617,25 +4546,13 @@ void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
- }
+ F32x4ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufpd(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufpd(dst.fp(), dst.fp(), imm_lane_idx);
- }
+ F64x2ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -4713,27 +4630,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- // TODO(fanchenk): Use movlhps and blendpd
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- if (imm_lane_idx == 0) {
- vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00000000);
- vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01010000);
- } else {
- vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00100000);
- vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01110000);
- }
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- if (imm_lane_idx == 0) {
- insertps(dst.fp(), src2.fp(), 0b00000000);
- insertps(dst.fp(), src2.fp(), 0b01010000);
- } else {
- insertps(dst.fp(), src2.fp(), 0b00100000);
- insertps(dst.fp(), src2.fp(), 0b01110000);
- }
- }
+ F64x2ReplaceLane(dst.fp(), src1.fp(), src2.fp(), imm_lane_idx);
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -4907,6 +4804,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index a544460ab9..f8b01ac960 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -738,22 +738,36 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
cache_state_.stack_state[stack_base + i]);
}
+ // Check whether the cached instance needs to be moved to another register.
+ // Register moves are executed as part of the {StackTransferRecipe}. Remember
+ // whether the register content has to be reloaded after executing the stack
+ // transfers.
+ bool reload_instance = false;
+ // If the registers match, or the destination has no cache register, nothing
+ // needs to be done.
if (cache_state_.cached_instance != target.cached_instance &&
target.cached_instance != no_reg) {
+ // On forward jumps, just reset the cached register in the target state.
if (jump_direction == kForwardJump) {
- // On forward jumps, just reset the cached instance in the target state.
target.ClearCachedInstanceRegister();
+ } else if (cache_state_.cached_instance != no_reg) {
+ // If the source has the content but in the wrong register, execute a
+ // register move as part of the stack transfer.
+ transfers.MoveRegister(LiftoffRegister{target.cached_instance},
+ LiftoffRegister{cache_state_.cached_instance},
+ kPointerKind);
} else {
- // On backward jumps, we already generated code assuming that the instance
- // is available in that register. Thus move it there.
- if (cache_state_.cached_instance == no_reg) {
- LoadInstanceFromFrame(target.cached_instance);
- } else {
- Move(target.cached_instance, cache_state_.cached_instance,
- kPointerKind);
- }
+ // Otherwise (the source state has no cached content), we reload later.
+ reload_instance = true;
}
}
+
+ // Now execute stack transfers and register moves/loads.
+ transfers.Execute();
+
+ if (reload_instance) {
+ LoadInstanceFromFrame(target.cached_instance);
+ }
}
void LiftoffAssembler::Spill(VarState* slot) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index dbff396f82..b0439dc4e1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -341,6 +341,11 @@ class LiftoffAssembler : public TurboAssembler {
}
void clear_used(LiftoffRegister reg) {
+ if (reg.is_pair()) {
+ clear_used(reg.low());
+ clear_used(reg.high());
+ return;
+ }
register_use_count[reg.liftoff_code()] = 0;
used_registers.clear(reg);
}
@@ -633,6 +638,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
inline void SpillInstance(Register instance);
+ inline void ResetOSRTarget();
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
@@ -669,7 +675,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
- bool is_load_mem = false, bool i64_offset = false);
+ bool is_load_mem = false);
inline void Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src, StoreType type,
LiftoffRegList pinned,
@@ -1416,6 +1422,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AllocateStackSlot(Register addr, uint32_t size);
inline void DeallocateStackSlot(uint32_t size);
+ // Instrumentation for shadow-stack-compatible OSR on x64.
+ inline void MaybeOSR();
+
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 84d217b2e4..926a4ae11e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -10,7 +10,7 @@
#include "src/codegen/assembler-inl.h"
// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/external-reference.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
@@ -75,7 +75,7 @@ struct assert_field_size {
__ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
-#ifdef DEBUG
+#ifdef V8_CODE_COMMENTS
#define DEBUG_CODE_COMMENT(str) \
do { \
__ RecordComment(str); \
@@ -151,12 +151,7 @@ constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
case kExprI32GeU:
return kUnsignedGreaterEqual;
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
-#else
- // We need to return something for old compilers here.
- return kEqual;
-#endif
}
}
@@ -362,7 +357,6 @@ class LiftoffCompiler {
Label catch_label;
bool catch_reached = false;
bool in_handler = false;
- int32_t previous_catch = -1;
};
struct Control : public ControlBase<Value, validate> {
@@ -594,8 +588,7 @@ class LiftoffCompiler {
}
}
- // TODO(ahaas): Make this function constexpr once GCC allows it.
- LiftoffRegList RegsUnusedByParams() {
+ constexpr static LiftoffRegList RegsUnusedByParams() {
LiftoffRegList regs = kGpCacheRegList;
for (auto reg : kGpParamRegisters) {
regs.clear(reg);
@@ -620,8 +613,9 @@ class LiftoffCompiler {
// For reference type parameters we have to use registers that were not
// used for parameters because some reference type stack parameters may
// get processed before some value type register parameters.
+ static constexpr auto kRegsUnusedByParams = RegsUnusedByParams();
LiftoffRegister reg = is_reference(reg_kind)
- ? __ GetUnusedRegister(RegsUnusedByParams())
+ ? __ GetUnusedRegister(kRegsUnusedByParams)
: __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
@@ -742,6 +736,7 @@ class LiftoffCompiler {
// Store the instance parameter to a special stack slot.
__ SpillInstance(kWasmInstanceRegister);
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
+ if (for_debugging_) __ ResetOSRTarget();
// Process parameters.
if (num_params) DEBUG_CODE_COMMENT("process parameters");
@@ -909,6 +904,9 @@ class LiftoffCompiler {
ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
}
DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
+ if (is_stack_check) {
+ MaybeOSR();
+ }
if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
@@ -1046,6 +1044,7 @@ class LiftoffCompiler {
DefineSafepointWithCalleeSavedRegisters();
RegisterDebugSideTableEntry(decoder,
DebugSideTableBuilder::kAllowRegisters);
+ MaybeOSR();
}
void PushControl(Control* block) {
@@ -1072,16 +1071,14 @@ class LiftoffCompiler {
// Save the current cache state for the merge when jumping to this loop.
loop->label_state.Split(*__ cache_state());
+ PushControl(loop);
+
// Execute a stack check in the loop header.
StackCheck(decoder, decoder->position());
-
- PushControl(loop);
}
void Try(FullDecoder* decoder, Control* block) {
block->try_info = std::make_unique<TryInfo>();
- block->try_info->previous_catch = current_catch_;
- current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
PushControl(block);
}
@@ -1114,7 +1111,6 @@ class LiftoffCompiler {
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
- current_catch_ = block->try_info->previous_catch; // Pop try scope.
__ emit_jump(block->label.get());
// The catch block is unreachable if no possible throws in the try block
@@ -1181,6 +1177,7 @@ class LiftoffCompiler {
if (depth == decoder->control_depth() - 1) {
// Delegate to the caller, do not emit a landing pad.
Rethrow(decoder, __ cache_state()->stack_state.back());
+ MaybeOSR();
} else {
DCHECK(target->is_incomplete_try());
if (!target->try_info->catch_reached) {
@@ -1194,14 +1191,15 @@ class LiftoffCompiler {
__ emit_jump(&target->try_info->catch_label);
}
}
- current_catch_ = block->try_info->previous_catch;
}
void Rethrow(FullDecoder* decoder, Control* try_block) {
int index = try_block->try_info->catch_state.stack_height() - 1;
auto& exception = __ cache_state()->stack_state[index];
Rethrow(decoder, exception);
- EmitLandingPad(decoder);
+ int pc_offset = __ pc_offset();
+ MaybeOSR();
+ EmitLandingPad(decoder, pc_offset);
}
void CatchAll(FullDecoder* decoder, Control* block) {
@@ -1209,8 +1207,6 @@ class LiftoffCompiler {
block->is_try_unwind());
DCHECK_EQ(decoder->control_at(0), block);
- current_catch_ = block->try_info->previous_catch; // Pop try scope.
-
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
@@ -1340,8 +1336,6 @@ class LiftoffCompiler {
if (!c->label.get()->is_bound()) __ bind(c->label.get());
}
- void EndControl(FullDecoder* decoder, Control* c) {}
-
void GenerateCCall(const LiftoffRegister* result_regs,
const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs,
@@ -2767,75 +2761,33 @@ class LiftoffCompiler {
return index;
}
- bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
- int access_size, uintptr_t* offset) {
- if (!index_slot.is_const()) return false;
-
- // Potentially zero extend index (which is a 32-bit constant).
- const uintptr_t index = static_cast<uint32_t>(index_slot.i32_const());
- const uintptr_t effective_offset = index + *offset;
-
- if (effective_offset < index // overflow
- || !base::IsInBounds<uintptr_t>(effective_offset, access_size,
- env_->min_memory_size)) {
- return false;
- }
-
- *offset = effective_offset;
- return true;
- }
-
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueKind kind = type.value_type().kind();
- RegClass rc = reg_class_for(kind);
if (!CheckSupportedType(decoder, kind, "load")) return;
+ LiftoffRegister full_index = __ PopToRegister();
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, {}, kDontForceCheck);
+ if (index == no_reg) return;
uintptr_t offset = imm.offset;
- Register index = no_reg;
-
- // Only look at the slot, do not pop it yet (will happen in PopToRegister
- // below, if this is not a statically-in-bounds index).
- auto& index_slot = __ cache_state()->stack_state.back();
- bool i64_offset = index_val.type == kWasmI64;
- if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
- __ cache_state()->stack_state.pop_back();
- DEBUG_CODE_COMMENT("load from memory (constant offset)");
- LiftoffRegList pinned;
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
- LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
- __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
- i64_offset);
- __ PushRegister(kind, value);
- } else {
- LiftoffRegister full_index = __ PopToRegister();
- index = BoundsCheckMem(decoder, type.size(), offset, full_index, {},
- kDontForceCheck);
- if (index == no_reg) return;
-
- DEBUG_CODE_COMMENT("load from memory");
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
-
- // Load the memory start address only now to reduce register pressure
- // (important on ia32).
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
- LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
-
- uint32_t protected_load_pc = 0;
- __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
- i64_offset);
- if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
- protected_load_pc);
- }
- __ PushRegister(kind, value);
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("load from memory");
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ RegClass rc = reg_class_for(kind);
+ LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
+ uint32_t protected_load_pc = 0;
+ __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
}
+ __ PushRegister(kind, value);
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
@@ -2878,7 +2830,7 @@ class LiftoffCompiler {
}
__ PushRegister(kS128, value);
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
// Again load extend is different.
MachineRepresentation mem_rep =
transform == LoadTransformationKind::kExtend
@@ -2920,7 +2872,7 @@ class LiftoffCompiler {
__ PushRegister(kS128, result);
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
@@ -2931,45 +2883,29 @@ class LiftoffCompiler {
const Value& index_val, const Value& value_val) {
ValueKind kind = type.value_type().kind();
if (!CheckSupportedType(decoder, kind, "store")) return;
-
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDontForceCheck);
+ if (index == no_reg) return;
uintptr_t offset = imm.offset;
- Register index = no_reg;
-
- auto& index_slot = __ cache_state()->stack_state.back();
- if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
- __ cache_state()->stack_state.pop_back();
- DEBUG_CODE_COMMENT("store to memory (constant offset)");
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
- __ Store(mem, no_reg, offset, value, type, pinned, nullptr, true);
- } else {
- LiftoffRegister full_index = __ PopToRegister(pinned);
- index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
- pinned, kDontForceCheck);
- if (index == no_reg) return;
-
- pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("store to memory");
- uint32_t protected_store_pc = 0;
- // Load the memory start address only now to reduce register pressure
- // (important on ia32).
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
- LiftoffRegList outer_pinned;
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
- __ Store(mem, index, offset, value, type, outer_pinned,
- &protected_store_pc, true);
- if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
- protected_store_pc);
- }
+ pinned.set(index);
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("store to memory");
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ uint32_t protected_store_pc = 0;
+ LiftoffRegList outer_pinned;
+ if (FLAG_trace_wasm_memory) outer_pinned.set(index);
+ __ Store(addr, index, offset, value, type, outer_pinned,
+ &protected_store_pc, true);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_store_pc);
}
-
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
@@ -2998,7 +2934,7 @@ class LiftoffCompiler {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
@@ -3210,6 +3146,32 @@ class LiftoffCompiler {
__ PushRegister(kRef, ref);
}
+ void BrOnNonNull(FullDecoder* decoder, const Value& ref_object,
+ uint32_t depth) {
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(depth)->br_merge()->arity);
+ }
+
+ Label cont_false;
+ LiftoffRegList pinned;
+ LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
+ // Put the reference back onto the stack for the branch.
+ __ PushRegister(kRef, ref);
+
+ Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LoadNullValue(null, pinned);
+ __ emit_cond_jump(kEqual, &cont_false, ref_object.type.kind(), ref.gp(),
+ null);
+
+ BrOrRet(decoder, depth, 0);
+ // Drop the reference if we are not branching.
+ __ DropValues(1);
+ __ bind(&cont_false);
+ }
+
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitTerOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
@@ -4112,22 +4074,22 @@ class LiftoffCompiler {
DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
}
- void EmitLandingPad(FullDecoder* decoder) {
- if (current_catch_ == -1) return;
+ void EmitLandingPad(FullDecoder* decoder, int handler_offset) {
+ if (decoder->current_catch() == -1) return;
MovableLabel handler;
- int handler_offset = __ pc_offset();
// If we return from the throwing code normally, just skip over the handler.
Label skip_handler;
__ emit_jump(&skip_handler);
// Handler: merge into the catch state, and jump to the catch body.
+ DEBUG_CODE_COMMENT("-- landing pad --");
__ bind(handler.get());
__ ExceptionHandler();
__ PushException();
handlers_.push_back({std::move(handler), handler_offset});
Control* current_try =
- decoder->control_at(decoder->control_depth() - 1 - current_catch_);
+ decoder->control_at(decoder->control_depth_of_current_catch());
DCHECK_NOT_NULL(current_try->try_info);
if (!current_try->try_info->catch_reached) {
current_try->try_info->catch_state.InitMerge(
@@ -4160,6 +4122,7 @@ class LiftoffCompiler {
{LiftoffAssembler::VarState{
kSmiKind, LiftoffRegister{encoded_size_reg}, 0}},
decoder->position());
+ MaybeOSR();
// The FixedArray for the exception values is now in the first gp return
// register.
@@ -4194,7 +4157,9 @@ class LiftoffCompiler {
LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
decoder->position());
- EmitLandingPad(decoder);
+ int pc_offset = __ pc_offset();
+ MaybeOSR();
+ EmitLandingPad(decoder, pc_offset);
}
void AtomicStoreMem(FullDecoder* decoder, StoreType type,
@@ -4214,9 +4179,9 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegList outer_pinned;
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
+ if (FLAG_trace_wasm_memory) outer_pinned.set(index);
__ AtomicStore(addr, index, offset, value, type, outer_pinned);
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
@@ -4242,7 +4207,7 @@ class LiftoffCompiler {
__ AtomicLoad(value, addr, index, offset, type, pinned);
__ PushRegister(kind, value);
- if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
+ if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
@@ -4318,6 +4283,7 @@ class LiftoffCompiler {
__ DropValues(1);
LiftoffRegister result = expected;
+ if (__ cache_state()->is_used(result)) __ SpillRegister(result);
// We already added the index to addr, so we can just pass no_reg to the
// assembler now.
@@ -4354,7 +4320,6 @@ class LiftoffCompiler {
std::initializer_list<LiftoffAssembler::VarState> params,
int position) {
DEBUG_CODE_COMMENT(
- // NOLINTNEXTLINE(whitespace/braces)
(std::string{"call builtin: "} + GetRuntimeStubName(stub_id)).c_str());
auto interface_descriptor = Builtins::CallInterfaceDescriptorFor(
RuntimeStubIdToBuiltinName(stub_id));
@@ -4868,6 +4833,18 @@ class LiftoffCompiler {
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value);
}
+ if (imm.struct_type->field_count() == 0) {
+ static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
+ WasmStruct::kHeaderSize == kTaggedSize,
+ "empty structs need exactly one padding field");
+ ValueKind field_kind = ValueKind::kRef;
+ LiftoffRegister value = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LoadNullValue(value.gp(), pinned);
+ StoreObjectField(obj.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize),
+ value, pinned, field_kind);
+ pinned.clear(value);
+ }
__ PushRegister(kRef, obj);
}
@@ -5232,7 +5209,7 @@ class LiftoffCompiler {
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* result_on_branch, uint32_t depth) {
+ Value* /* result_on_branch */, uint32_t depth) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (depth != decoder->control_depth() - 1) {
@@ -5253,6 +5230,27 @@ class LiftoffCompiler {
__ PushRegister(obj.type.kind(), obj_reg);
}
+ void BrOnCastFail(FullDecoder* decoder, const Value& obj, const Value& rtt,
+ Value* /* result_on_fallthrough */, uint32_t depth) {
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(depth)->br_merge()->arity);
+ }
+
+ Label cont_branch, fallthrough;
+ LiftoffRegister obj_reg =
+ SubtypeCheck(decoder, obj, rtt, &cont_branch, kNullFails);
+ __ PushRegister(obj.type.kind(), obj_reg);
+ __ emit_jump(&fallthrough);
+
+ __ bind(&cont_branch);
+ BrOrRet(decoder, depth, 0);
+
+ __ bind(&fallthrough);
+ }
+
// Abstract type checkers. They all return the object register and fall
// through to match.
LiftoffRegister DataCheck(const Value& obj, Label* no_match,
@@ -5484,6 +5482,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
+ FinishCall(decoder, sig, call_descriptor);
}
} else {
// A direct call within this module just gets the current instance.
@@ -5501,15 +5500,9 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallNativeWasmCode(addr);
+ FinishCall(decoder, sig, call_descriptor);
}
}
-
- if (!tail_call) {
- DefineSafepoint();
- RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- EmitLandingPad(decoder);
- __ FinishCall(sig, call_descriptor);
- }
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
@@ -5604,7 +5597,6 @@ class LiftoffCompiler {
__ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
pinned);
- // TODO(9495): Do not always compare signatures, same as wasm-compiler.cc.
// Compare against expected signature.
__ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
@@ -5675,10 +5667,7 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
- DefineSafepoint();
- RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- EmitLandingPad(decoder);
- __ FinishCall(sig, call_descriptor);
+ FinishCall(decoder, sig, call_descriptor);
}
}
@@ -5693,9 +5682,9 @@ class LiftoffCompiler {
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- // Since this is a call instruction, we'll have to spill everything later
- // anyway; do it right away so that the register state tracking doesn't
- // get confused by the conditional builtin call below.
+ // Executing a write barrier needs temp registers; doing this on a
+ // conditional branch confuses the LiftoffAssembler's register management.
+ // Spill everything up front to work around that.
__ SpillAllRegisters();
// We limit ourselves to four registers:
@@ -5710,6 +5699,7 @@ class LiftoffCompiler {
LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Load the WasmFunctionData.
LiftoffRegister func_data = func_ref;
__ LoadTaggedPointer(
func_data.gp(), func_ref.gp(), no_reg,
@@ -5720,144 +5710,65 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
pinned);
- LiftoffRegister data_type = instance;
- __ LoadMap(data_type.gp(), func_data.gp());
- __ Load(data_type, data_type.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
- LoadType::kI32Load16U, pinned);
+ // Load "ref" (instance or <instance, callable> pair) and target.
+ __ LoadTaggedPointer(
+ instance.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset), pinned);
- Label is_js_function, perform_call;
- __ emit_i32_cond_jumpi(kEqual, &is_js_function, data_type.gp(),
- WASM_JS_FUNCTION_DATA_TYPE);
- // End of {data_type}'s live range.
+ Label load_target, perform_call;
+ // Check if "ref" is a Tuple2.
{
- // Call to a WasmExportedFunction.
-
- LiftoffRegister callee_instance = instance;
- __ LoadTaggedPointer(callee_instance.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kInstanceOffset),
- pinned);
- LiftoffRegister func_index = target;
- __ LoadSmiAsInt32(func_index, func_data.gp(),
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kFunctionIndexOffset),
- pinned);
- LiftoffRegister imported_function_refs = temp;
- __ LoadTaggedPointer(imported_function_refs.gp(), callee_instance.gp(),
- no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionRefsOffset),
- pinned);
- // We overwrite {imported_function_refs} here, at the cost of having
- // to reload it later, because we don't have more registers on ia32.
- LiftoffRegister imported_functions_num = imported_function_refs;
- __ LoadFixedArrayLengthAsInt32(imported_functions_num,
- imported_function_refs.gp(), pinned);
-
- Label imported;
- __ emit_cond_jump(kSignedLessThan, &imported, kI32, func_index.gp(),
- imported_functions_num.gp());
-
- {
- // Function locally defined in module.
-
- // {func_index} is invalid from here on.
- LiftoffRegister jump_table_start = target;
- __ Load(jump_table_start, callee_instance.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kJumpTableStartOffset),
- kPointerLoadType, pinned);
- LiftoffRegister jump_table_offset = temp;
- __ LoadSmiAsInt32(jump_table_offset, func_data.gp(),
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kJumpTableOffsetOffset),
+ LiftoffRegister pair_map = temp;
+ LiftoffRegister ref_map = target;
+ __ LoadMap(ref_map.gp(), instance.gp());
+ LOAD_INSTANCE_FIELD(pair_map.gp(), IsolateRoot, kSystemPointerSize,
pinned);
- __ emit_ptrsize_add(target.gp(), jump_table_start.gp(),
- jump_table_offset.gp());
- __ emit_jump(&perform_call);
- }
-
- {
- // Function imported to module.
- __ bind(&imported);
-
- LiftoffRegister imported_function_targets = temp;
- __ Load(imported_function_targets, callee_instance.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionTargetsOffset),
- kPointerLoadType, pinned);
- // {callee_instance} is invalid from here on.
- LiftoffRegister imported_instance = instance;
- // Scale {func_index} to kTaggedSize.
- __ emit_i32_shli(func_index.gp(), func_index.gp(), kTaggedSizeLog2);
- // {func_data} is invalid from here on.
- imported_function_refs = func_data;
- __ LoadTaggedPointer(
- imported_function_refs.gp(), callee_instance.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionRefsOffset),
- pinned);
- __ LoadTaggedPointer(
- imported_instance.gp(), imported_function_refs.gp(),
- func_index.gp(),
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0), pinned);
- // Scale {func_index} to kSystemPointerSize.
- if (kSystemPointerSize == kTaggedSize * 2) {
- __ emit_i32_add(func_index.gp(), func_index.gp(), func_index.gp());
- } else {
- DCHECK_EQ(kSystemPointerSize, kTaggedSize);
- }
- // This overwrites the contents of {func_index}, which we don't need
- // any more.
- __ Load(target, imported_function_targets.gp(), func_index.gp(), 0,
- kPointerLoadType, pinned);
- __ emit_jump(&perform_call);
- }
- }
-
- {
- // Call to a WasmJSFunction. The call target is
- // function_data->wasm_to_js_wrapper_code()->instruction_start().
- // The instance_node is the pair
- // (current WasmInstanceObject, function_data->callable()).
- __ bind(&is_js_function);
-
- LiftoffRegister callable = temp;
- __ LoadTaggedPointer(
- callable.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset),
- pinned);
-
- // Preserve {func_data} across the call.
- LiftoffRegList saved_regs = LiftoffRegList::ForRegs(func_data);
- __ PushRegisters(saved_regs);
+ __ LoadTaggedPointer(pair_map.gp(), pair_map.gp(), no_reg,
+ IsolateData::root_slot_offset(RootIndex::kTuple2Map),
+ pinned);
+ __ emit_cond_jump(kUnequal, &load_target, kRef, ref_map.gp(),
+ pair_map.gp());
- LiftoffRegister current_instance = instance;
+ // Overwrite the tuple's "instance" entry with the current instance.
+ // TODO(jkummerow): Can we figure out a way to guarantee that the
+ // instance field is always precomputed?
+ LiftoffRegister current_instance = temp;
__ FillInstanceInto(current_instance.gp());
- LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
- LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
-
- CallRuntimeStub(WasmCode::kWasmAllocatePair,
- MakeSig::Returns(kOptRef).Params(kOptRef, kOptRef),
- {instance_var, callable_var}, decoder->position());
- if (instance.gp() != kReturnRegister0) {
- __ Move(instance.gp(), kReturnRegister0, kPointerKind);
- }
-
- // Restore {func_data}, which we saved across the call.
- __ PopRegisters(saved_regs);
+ __ StoreTaggedPointer(instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Tuple2::kValue1Offset),
+ current_instance, pinned);
+ // Fall through to {load_target}.
+ }
+ // Load the call target.
+ __ bind(&load_target);
+
+#ifdef V8_HEAP_SANDBOX
+ LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
+ __ LoadExternalPointerField(
+ target.gp(),
+ FieldOperand(func_data.gp(), WasmFunctionData::kForeignAddressOffset),
+ kForeignForeignAddressTag, temp.gp(),
+ TurboAssembler::IsolateRootLocation::kInScratchRegister);
+#else
+ __ Load(
+ target, func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset),
+ kPointerLoadType, pinned);
+#endif
- LiftoffRegister wrapper_code = target;
- __ LoadTaggedPointer(wrapper_code.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
- pinned);
- __ emit_ptrsize_addi(target.gp(), wrapper_code.gp(),
- wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
- // Fall through to {perform_call}.
- }
+ LiftoffRegister null_address = temp;
+ __ LoadConstant(null_address, WasmValue::ForUintPtr(0));
+ __ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
+ null_address.gp());
+ // The cached target can only be null for WasmJSFunctions.
+ __ LoadTaggedPointer(target.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
+ pinned);
+ __ emit_ptrsize_addi(target.gp(), target.gp(),
+ wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
+ // Fall through to {perform_call}.
__ bind(&perform_call);
// Now the call target is in {target}, and the right instance object
@@ -5876,18 +5787,14 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target_reg);
- DefineSafepoint();
- RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- EmitLandingPad(decoder);
- __ FinishCall(sig, call_descriptor);
+ FinishCall(decoder, sig, call_descriptor);
}
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
- __ LoadTaggedPointer(null, null, no_reg,
- IsolateData::root_slot_offset(RootIndex::kNullValue),
- pinned);
+ __ LoadFullPointer(null, null,
+ IsolateData::root_slot_offset(RootIndex::kNullValue));
}
void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
@@ -6004,6 +5911,22 @@ class LiftoffCompiler {
WASM_STRUCT_TYPE - WASM_ARRAY_TYPE);
}
+ void MaybeOSR() {
+ if (V8_UNLIKELY(for_debugging_)) {
+ __ MaybeOSR();
+ }
+ }
+
+ void FinishCall(FullDecoder* decoder, ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor) {
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+ int pc_offset = __ pc_offset();
+ MaybeOSR();
+ EmitLandingPad(decoder, pc_offset);
+ __ FinishCall(sig, call_descriptor);
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
kI32, kI64, kF32, kF64};
@@ -6051,9 +5974,6 @@ class LiftoffCompiler {
// at the first breakable opcode in the function (if compiling for debugging).
bool did_function_entry_break_checks_ = false;
- // Depth of the current try block.
- int32_t current_catch_ = -1;
-
struct HandlerInfo {
MovableLabel handler;
int pc_offset;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index bb27b99dc2..63ac2acf8b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -152,11 +152,12 @@ class LiftoffRegister {
"chosen type is small enough");
public:
- explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
+ constexpr explicit LiftoffRegister(Register reg)
+ : LiftoffRegister(reg.code()) {
DCHECK_NE(0, kLiftoffAssemblerGpCacheRegs & reg.bit());
DCHECK_EQ(reg, gp());
}
- explicit LiftoffRegister(DoubleRegister reg)
+ constexpr explicit LiftoffRegister(DoubleRegister reg)
: LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
DCHECK_NE(0, kLiftoffAssemblerFpCacheRegs & reg.bit());
DCHECK_EQ(reg, fp());
@@ -275,22 +276,22 @@ class LiftoffRegister {
return DoubleRegister::from_code((code_ & kCodeMask) + 1);
}
- Register gp() const {
+ constexpr Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
}
- DoubleRegister fp() const {
+ constexpr DoubleRegister fp() const {
DCHECK(is_fp());
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- int liftoff_code() const {
+ constexpr int liftoff_code() const {
STATIC_ASSERT(sizeof(int) >= sizeof(storage_t));
return static_cast<int>(code_);
}
- RegClass reg_class() const {
+ constexpr RegClass reg_class() const {
return is_fp_pair() ? kFpRegPair
: is_gp_pair() ? kGpRegPair : is_gp() ? kGpReg : kFpReg;
}
@@ -364,7 +365,7 @@ class LiftoffRegList {
return reg;
}
- LiftoffRegister clear(LiftoffRegister reg) {
+ constexpr LiftoffRegister clear(LiftoffRegister reg) {
if (reg.is_pair()) {
regs_ &= ~(storage_t{1} << reg.low().liftoff_code());
regs_ &= ~(storage_t{1} << reg.high().liftoff_code());
@@ -373,8 +374,10 @@ class LiftoffRegList {
}
return reg;
}
- Register clear(Register reg) { return clear(LiftoffRegister{reg}).gp(); }
- DoubleRegister clear(DoubleRegister reg) {
+ constexpr Register clear(Register reg) {
+ return clear(LiftoffRegister{reg}).gp();
+ }
+ constexpr DoubleRegister clear(DoubleRegister reg) {
return clear(LiftoffRegister{reg}).fp();
}
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index d078fd5e42..58d2d8545c 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -433,6 +433,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
sw(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
@@ -468,7 +470,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
: MemOperand(dst_addr, offset_imm);
Sw(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
// The write barrier.
Label write_barrier;
@@ -483,16 +485,15 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
Addu(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
Register src = no_reg;
if (offset_reg != no_reg) {
src = GetUnusedRegister(kGpReg, pinned).gp();
@@ -2836,7 +2837,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -2999,6 +3000,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addiu(sp, sp, size);
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index dfbd8d6a75..15b3b4f7c4 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -418,6 +418,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Sd(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
}
@@ -448,7 +450,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Label write_barrier;
Label exit;
@@ -462,16 +464,15 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
Daddu(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
@@ -2995,7 +2996,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -3167,6 +3168,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Daddu(sp, sp, size);
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/ppc/OWNERS b/deps/v8/src/wasm/baseline/ppc/OWNERS
deleted file mode 100644
index 02c2cd757c..0000000000
--- a/deps/v8/src/wasm/baseline/ppc/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-junyan@redhat.com
-joransiu@ca.ibm.com
-midawson@redhat.com
-mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index bedee1a939..10d574301e 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -45,6 +45,47 @@ inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
return MemOperand(fp, -kInstanceOffset - offset + half_offset);
}
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ case kUnsignedLessEqual:
+ return le;
+ case kSignedGreaterEqual:
+ case kUnsignedGreaterEqual:
+ return ge;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ }
+}
+
+inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ case kUnequal:
+ case kSignedLessThan:
+ case kSignedLessEqual:
+ case kSignedGreaterThan:
+ case kSignedGreaterEqual:
+ return true;
+ case kUnsignedLessThan:
+ case kUnsignedLessEqual:
+ case kUnsignedGreaterThan:
+ case kUnsignedGreaterEqual:
+ return false;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -87,7 +128,30 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- bailout(kUnsupportedArchitecture, "LoadConstant");
+ switch (value.type().kind()) {
+ case kI32:
+ mov(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ mov(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
+ MovIntToFloat(reg.fp(), scratch);
+ break;
+ }
+ case kF64: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
+ MovInt64ToDouble(reg.fp(), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
@@ -109,6 +173,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
bailout(kUnsupportedArchitecture, "SpillInstance");
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
@@ -137,8 +203,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
bailout(kUnsupportedArchitecture, "Load");
}
@@ -525,56 +590,123 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) {
- bailout(kUnsupportedArchitecture, "emit_jump");
-}
+void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
-void LiftoffAssembler::emit_jump(Register target) {
- bailout(kUnsupportedArchitecture, "emit_jump");
-}
+void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
- bailout(kUnsupportedArchitecture, "emit_cond_jump");
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+
+ if (rhs != no_reg) {
+ switch (kind) {
+ case kI32:
+ if (use_signed) {
+ cmpw(lhs, rhs);
+ } else {
+ cmplw(lhs, rhs);
+ }
+ break;
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
+ case kI64:
+ if (use_signed) {
+ cmp(lhs, rhs);
+ } else {
+ cmpl(lhs, rhs);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(kind, kI32);
+ CHECK(use_signed);
+ cmpwi(lhs, Operand::Zero());
+ }
+
+ b(cond, label);
}
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
- bailout(kUnsupportedArchitecture, "emit_i32_cond_jumpi");
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Cmpwi(lhs, Operand(imm), r0);
+ b(cond, label);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_eqz");
+ Label done;
+ cmpwi(src, Operand(0));
+ mov(dst, Operand(1));
+ beq(&done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
Register dst, Register lhs,
Register rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ if (use_signed) {
+ cmpw(lhs, rhs);
+ } else {
+ cmplw(lhs, rhs);
+ }
+ Label done;
+ mov(dst, Operand(1));
+ b(liftoff::ToCondition(liftoff_cond), &done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_eqz");
+ Label done;
+ cmpi(src.gp(), Operand(0));
+ mov(dst, Operand(1));
+ beq(&done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ if (use_signed) {
+ cmp(lhs.gp(), rhs.gp());
+ } else {
+ cmpl(lhs.gp(), rhs.gp());
+ }
+ Label done;
+ mov(dst, Operand(1));
+ b(liftoff::ToCondition(liftoff_cond), &done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
+ fcmpu(lhs, rhs);
+ Label done;
+ mov(dst, Operand(1));
+ b(liftoff::ToCondition(liftoff_cond), &done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
+ emit_f32_set_cond(liftoff_cond, dst, lhs, rhs);
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -1802,6 +1934,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index bb6c3bcad8..3f549a3df6 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -325,9 +325,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
patching_assembler.Add64(sp, sp, Operand(-frame_size));
}
-void LiftoffAssembler::FinishCode() {}
+void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
-void LiftoffAssembler::AbortCompilation() {}
+void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
@@ -382,12 +382,19 @@ void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int offset, int size) {
DCHECK_LE(0, offset);
- DCHECK(size == 4 || size == 8);
MemOperand src{instance, offset};
- if (size == 4) {
- Lw(dst, src);
- } else {
- Ld(dst, src);
+ switch (size) {
+ case 1:
+ Lb(dst, MemOperand(src));
+ break;
+ case 4:
+ Lw(dst, MemOperand(src));
+ break;
+ case 8:
+ Ld(dst, MemOperand(src));
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
@@ -401,6 +408,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Sd(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
}
@@ -414,6 +423,12 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Ld(dst, src_op);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld(dst, src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -425,7 +440,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Label write_barrier;
Label exit;
@@ -437,17 +452,16 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
- Add64(scratch, dst_addr, offset_imm);
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ Add64(scratch, dst_op.rm(), dst_op.offset());
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
@@ -544,60 +558,297 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
+namespace liftoff {
+#define __ lasm->
+
+inline Register CalculateActualAddress(LiftoffAssembler* lasm,
+ Register addr_reg, Register offset_reg,
+ uintptr_t offset_imm,
+ Register result_reg) {
+ DCHECK_NE(offset_reg, no_reg);
+ DCHECK_NE(addr_reg, no_reg);
+ __ Add64(result_reg, addr_reg, Operand(offset_reg));
+ if (offset_imm != 0) {
+ __ Add64(result_reg, result_reg, Operand(offset_imm));
+ }
+ return result_reg;
+}
+
+enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
+
+inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister value, LiftoffRegister result,
+ StoreType type, Binop op) {
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+
+ // Make sure that {result} is unique.
+ Register result_reg = result.gp();
+ if (result_reg == value.gp() || result_reg == dst_addr ||
+ result_reg == offset_reg) {
+ result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(lasm);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ lasm, dst_addr, offset_reg, offset_imm, temps.Acquire());
+
+ // Allocate an additional {temp} register to hold the result that should be
+ // stored to memory. Note that {temp} and {store_result} are not allowed to be
+ // the same register.
+ Register temp = temps.Acquire();
+
+ Label retry;
+ __ bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ lbu(result_reg, actual_addr, 0);
+ __ sync();
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ lhu(result_reg, actual_addr, 0);
+ __ sync();
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ lr_w(true, false, result_reg, actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ lr_d(true, false, result_reg, actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ switch (op) {
+ case Binop::kAdd:
+ __ add(temp, result_reg, value.gp());
+ break;
+ case Binop::kSub:
+ __ sub(temp, result_reg, value.gp());
+ break;
+ case Binop::kAnd:
+ __ and_(temp, result_reg, value.gp());
+ break;
+ case Binop::kOr:
+ __ or_(temp, result_reg, value.gp());
+ break;
+ case Binop::kXor:
+ __ xor_(temp, result_reg, value.gp());
+ break;
+ case Binop::kExchange:
+ __ mv(temp, value.gp());
+ break;
+ }
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ sync();
+ __ sb(temp, actual_addr, 0);
+ __ sync();
+ __ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ sync();
+ __ sh(temp, actual_addr, 0);
+ __ sync();
+ __ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ sc_w(false, true, store_result, actual_addr, temp);
+ break;
+ case StoreType::kI64Store:
+ __ sc_w(false, true, store_result, actual_addr, temp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bnez(store_result, &retry);
+ if (result_reg != result.gp()) {
+ __ mv(result.gp(), result_reg);
+ }
+}
+
+#undef __
+} // namespace liftoff
+
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ UseScratchRegisterScope temps(this);
+ Register src_reg = liftoff::CalculateActualAddress(
+ this, src_addr, offset_reg, offset_imm, temps.Acquire());
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ lbu(dst.gp(), src_reg, 0);
+ sync();
+ return;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ lhu(dst.gp(), src_reg, 0);
+ sync();
+ return;
+ case LoadType::kI32Load:
+ lr_w(true, true, dst.gp(), src_reg);
+ return;
+ case LoadType::kI64Load32U:
+ lr_w(true, true, dst.gp(), src_reg);
+ slli(dst.gp(), dst.gp(), 32);
+ srli(dst.gp(), dst.gp(), 32);
+ return;
+ case LoadType::kI64Load:
+ lr_d(true, true, dst.gp(), src_reg);
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ UseScratchRegisterScope temps(this);
+ Register dst_reg = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.Acquire());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ sync();
+ sb(src.gp(), dst_reg, 0);
+ sync();
+ return;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ sync();
+ sh(src.gp(), dst_reg, 0);
+ sync();
+ return;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ sc_w(true, true, zero_reg, dst_reg, src.gp());
+ return;
+ case StoreType::kI64Store:
+ sc_d(true, true, zero_reg, dst_reg, src.gp());
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kXor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kExchange);
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, new_value);
+
+ Register result_reg = result.gp();
+ if (pinned.has(result)) {
+ result_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(this);
+
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.Acquire());
+
+ Register store_result = temps.Acquire();
+
+ Label retry;
+ Label done;
+ bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ lbu(result_reg, actual_addr, 0);
+ sync();
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sync();
+ sb(new_value.gp(), actual_addr, 0);
+ sync();
+ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ lhu(result_reg, actual_addr, 0);
+ sync();
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sync();
+ sh(new_value.gp(), actual_addr, 0);
+ sync();
+ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ lr_w(true, true, result_reg, actual_addr);
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sc_w(true, true, store_result, new_value.gp(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ lr_d(true, true, result_reg, actual_addr);
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sc_d(true, true, store_result, new_value.gp(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ bnez(store_result, &retry);
+ bind(&done);
+
+ if (result_reg != result.gp()) {
+ mv(result.gp(), result_reg);
+ }
}
void LiftoffAssembler::AtomicFence() { sync(); }
@@ -2413,7 +2664,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -2543,8 +2794,8 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
- pop(kScratchReg);
- Call(kScratchReg);
+ pop(t6);
+ Call(t6);
} else {
Call(target);
}
@@ -2552,8 +2803,8 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
void LiftoffAssembler::TailCallIndirect(Register target) {
if (target == no_reg) {
- Pop(kScratchReg);
- Jump(kScratchReg);
+ Pop(t6);
+ Jump(t6);
} else {
Jump(target);
}
@@ -2574,6 +2825,7 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Add64(sp, sp, Operand(size));
}
+void LiftoffAssembler::MaybeOSR() {}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 04f30939fd..4c230ed305 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -224,6 +224,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
StoreU64(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
LoadU64(dst, liftoff::GetInstanceOperand());
}
@@ -254,7 +256,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Label write_barrier;
Label exit;
@@ -269,25 +271,19 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
eq, &exit);
lay(r1, dst_op);
- CallRecordWriteStub(dst_addr, r1, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, r1, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
+ uint32_t* protected_load_pc, bool is_load_mem) {
UseScratchRegisterScope temps(this);
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
if (offset_reg != no_reg) {
- if (!i64_offset) {
- // Clear the upper 32 bits of the 64 bit offset register.
- llgfr(r0, offset_reg);
- offset_reg = r0;
- }
AddS64(ip, offset_reg);
}
offset_reg = ip;
@@ -671,19 +667,253 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ AndP(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ AndP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ AndP(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ AndP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ AndP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ AndP(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ AndP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ OrP(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ OrP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ OrP(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ OrP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ OrP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ OrP(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ OrP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ XorP(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ XorP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ XorP(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ XorP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ XorP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ XorP(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ XorP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
@@ -1168,9 +1398,19 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
LFR_TO_REG, LFR_TO_REG, USE, , void) \
V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(f32_ceil, CeilF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f32_floor, FloorF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f32_trunc, TruncF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f32_nearest_int, NearestIntF32, DoubleRegister, DoubleRegister, , , USE, \
+ true, bool) \
V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_ceil, CeilF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_floor, FloorF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_trunc, TruncF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_nearest_int, NearestIntF64, DoubleRegister, DoubleRegister, , , USE, \
+ true, bool) \
V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE, , void)
@@ -1191,6 +1431,14 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
#define BINOP_LIST(V) \
+ V(f32_min, FloatMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, FloatMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, DoubleMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, DoubleMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
@@ -1285,84 +1533,6 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
-bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
- fiebra(ROUND_TOWARD_POS_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
- fiebra(ROUND_TOWARD_NEG_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
- fiebra(ROUND_TOWARD_0, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
- DoubleRegister src) {
- fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
- return true;
-}
-
-void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmin(dst, lhs, rhs, Condition(1), Condition(8), Condition(3));
- return;
- }
- DoubleMin(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmin(dst, lhs, rhs, Condition(1), Condition(8), Condition(2));
- return;
- }
- FloatMin(dst, lhs, rhs);
-}
-
-bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
- fidbra(ROUND_TOWARD_POS_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
- fidbra(ROUND_TOWARD_NEG_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
- fidbra(ROUND_TOWARD_0, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
- DoubleRegister src) {
- fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
- return true;
-}
-
-void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmax(dst, lhs, rhs, Condition(1), Condition(8), Condition(3));
- return;
- }
- DoubleMax(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmax(dst, lhs, rhs, Condition(1), Condition(8), Condition(2));
- return;
- }
- FloatMax(dst, lhs, rhs);
-}
-
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -1896,7 +2066,9 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target); // branch if SMI
}
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
@@ -3213,6 +3385,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
lay(sp, MemOperand(sp, size));
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 68619a9f1b..3da9656b42 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -70,6 +70,8 @@ inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+inline Operand GetOSRTargetSlot() { return GetStackSlot(kOSRTargetOffset); }
+
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uintptr_t offset_imm) {
if (is_uint31(offset_imm)) {
@@ -79,7 +81,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
- assm->Set(scratch, offset_imm);
+ assm->TurboAssembler::Move(scratch, offset_imm);
if (offset != no_reg) assm->addq(scratch, offset);
return Operand(addr, scratch, times_1, 0);
}
@@ -249,7 +251,7 @@ void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
- return liftoff::kInstanceOffset;
+ return kOSRTargetOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
@@ -272,7 +274,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break;
case kI64:
if (RelocInfo::IsNone(rmode)) {
- TurboAssembler::Set(reg.gp(), value.to_i64());
+ TurboAssembler::Move(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
@@ -322,6 +324,10 @@ void LiftoffAssembler::SpillInstance(Register instance) {
movq(liftoff::GetInstanceOperand(), instance);
}
+void LiftoffAssembler::ResetOSRTarget() {
+ movq(liftoff::GetOSRTargetSlot(), Immediate(0));
+}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
movq(dst, liftoff::GetInstanceOperand());
}
@@ -331,7 +337,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
int32_t offset_imm,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
- if (emit_debug_code() && offset_reg != no_reg) {
+ if (FLAG_debug_code && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg,
@@ -357,7 +363,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
static_cast<uint32_t>(offset_imm));
StoreTaggedField(dst_op, src.gp());
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
@@ -375,8 +381,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
leaq(scratch, dst_op);
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -389,11 +395,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
- uint32_t* protected_load_pc, bool is_load_mem,
- bool i64_offset) {
- if (offset_reg != no_reg && !i64_offset) {
- AssertZeroExtended(offset_reg);
- }
+ uint32_t* protected_load_pc, bool is_load_mem) {
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
@@ -1257,7 +1259,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
if (!is_int32(imm)) {
- TurboAssembler::Set(kScratchRegister, imm);
+ TurboAssembler::Move(kScratchRegister, imm);
if (lhs.gp() == dst.gp()) {
addq(dst.gp(), kScratchRegister);
} else {
@@ -2538,7 +2540,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Shufps(dst.fp(), src.fp(), src.fp(), 0);
+ F32x4Splat(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -2850,13 +2852,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Pcmpeqd(dst.fp(), dst.fp());
- Pxor(dst.fp(), src.fp());
- } else {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pxor(dst.fp(), kScratchDoubleReg);
- }
+ S128Not(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3433,15 +3429,7 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
- Pxor(reg, reg);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsubq(dst.fp(), reg, src.fp());
- } else {
- psubq(reg, src.fp());
- if (dst.fp() != reg) movaps(dst.fp(), reg);
- }
+ I64x2Neg(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
@@ -3508,13 +3496,13 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Movaps(tmp1.fp(), lhs.fp());
Movaps(tmp2.fp(), rhs.fp());
// Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), 32);
+ Psrlq(tmp1.fp(), byte{32});
Pmuludq(tmp1.fp(), rhs.fp());
// Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), 32);
+ Psrlq(tmp2.fp(), byte{32});
Pmuludq(tmp2.fp(), lhs.fp());
Paddq(tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), 32);
+ Psllq(tmp2.fp(), byte{32});
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
this, dst, lhs, rhs);
Paddq(dst.fp(), tmp2.fp());
@@ -3590,11 +3578,11 @@ void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, static_cast<byte>(31));
+ Pslld(kScratchDoubleReg, byte{31});
Xorps(dst.fp(), kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), static_cast<byte>(31));
+ Pslld(dst.fp(), byte{31});
Xorps(dst.fp(), src.fp());
}
}
@@ -3678,7 +3666,7 @@ void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
// propagate -0's and NaNs, which may be non-canonical.
Orps(kScratchDoubleReg, dst.fp());
// Canonicalize NaNs by quieting and clearing the payload.
- Cmpps(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Cmpunordps(dst.fp(), kScratchDoubleReg);
Orps(kScratchDoubleReg, dst.fp());
Psrld(dst.fp(), byte{10});
Andnps(dst.fp(), kScratchDoubleReg);
@@ -3710,7 +3698,7 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
// Propagate sign discrepancy and (subtle) quiet NaNs.
Subps(kScratchDoubleReg, dst.fp());
// Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpps(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Cmpunordps(dst.fp(), kScratchDoubleReg);
Psrld(dst.fp(), byte{10});
Andnps(dst.fp(), kScratchDoubleReg);
}
@@ -3733,11 +3721,11 @@ void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, static_cast<byte>(1));
+ Psrlq(kScratchDoubleReg, byte{1});
Andpd(dst.fp(), kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), static_cast<byte>(1));
+ Psrlq(dst.fp(), byte{1});
Andpd(dst.fp(), src.fp());
}
}
@@ -3814,61 +3802,12 @@ void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminpd(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- minpd(kScratchDoubleReg, dst.fp());
- minpd(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- minpd(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minpd(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orpd(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmppd(dst.fp(), kScratchDoubleReg, int8_t{3});
- Orpd(kScratchDoubleReg, dst.fp());
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), kScratchDoubleReg);
+ F64x2Min(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxpd(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- maxpd(kScratchDoubleReg, dst.fp());
- maxpd(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- maxpd(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxpd(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorpd(dst.fp(), kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orpd(kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subpd(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmppd(dst.fp(), kScratchDoubleReg, int8_t{3});
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), kScratchDoubleReg);
+ F64x2Max(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4152,20 +4091,13 @@ void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
- }
+ F32x4ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- Pextrq(kScratchRegister, lhs.fp(), static_cast<int8_t>(imm_lane_idx));
- Movq(dst.fp(), kScratchRegister);
+ F64x2ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -4241,22 +4173,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- if (imm_lane_idx == 0) {
- vpblendw(dst.fp(), src1.fp(), src2.fp(), 0b00001111);
- } else {
- vmovlhps(dst.fp(), src1.fp(), src2.fp());
- }
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- if (imm_lane_idx == 0) {
- pblendw(dst.fp(), src2.fp(), 0b00001111);
- } else {
- movlhps(dst.fp(), src2.fp());
- }
- }
+ F64x2ReplaceLane(dst.fp(), src1.fp(), src2.fp(), imm_lane_idx);
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -4428,6 +4345,12 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addq(rsp, Immediate(size));
}
+void LiftoffAssembler::MaybeOSR() {
+ cmpq(liftoff::GetOSRTargetSlot(), Immediate(0));
+ j(not_equal, static_cast<Address>(WasmCode::kWasmOnStackReplace),
+ RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/branch-hint-map.h b/deps/v8/src/wasm/branch-hint-map.h
new file mode 100644
index 0000000000..242bbecbce
--- /dev/null
+++ b/deps/v8/src/wasm/branch-hint-map.h
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BRANCH_HINT_MAP_H_
+#define V8_WASM_BRANCH_HINT_MAP_H_
+
+#include <unordered_map>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+
+enum class WasmBranchHint : uint8_t {
+ kNoHint = 0,
+ kUnlikely = 1,
+ kLikely = 2,
+};
+
+class V8_EXPORT_PRIVATE BranchHintMap {
+ public:
+ void insert(uint32_t offset, WasmBranchHint hint) {
+ map_.emplace(offset, hint);
+ }
+ WasmBranchHint GetHintFor(uint32_t offset) const {
+ auto it = map_.find(offset);
+ if (it == map_.end()) {
+ return WasmBranchHint::kNoHint;
+ }
+ return it->second;
+ }
+
+ private:
+ std::unordered_map<uint32_t, WasmBranchHint> map_;
+};
+
+using BranchHintInfo = std::unordered_map<uint32_t, BranchHintMap>;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BRANCH_HINT_MAP_H_
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 3af5afaee3..72d2e07305 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -257,7 +257,6 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config>&& config) -> own<Engine> {
i::FLAG_expose_gc = true;
i::FLAG_experimental_wasm_reftypes = true;
- i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
@@ -456,8 +455,7 @@ struct FuncTypeImpl : ExternTypeImpl {
ownvec<ValType> params;
ownvec<ValType> results;
- FuncTypeImpl(ownvec<ValType>& params, // NOLINT(runtime/references)
- ownvec<ValType>& results) // NOLINT(runtime/references)
+ FuncTypeImpl(ownvec<ValType>& params, ownvec<ValType>& results)
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
@@ -508,8 +506,7 @@ struct GlobalTypeImpl : ExternTypeImpl {
own<ValType> content;
Mutability mutability;
- GlobalTypeImpl(own<ValType>& content, // NOLINT(runtime/references)
- Mutability mutability)
+ GlobalTypeImpl(own<ValType>& content, Mutability mutability)
: ExternTypeImpl(EXTERN_GLOBAL),
content(std::move(content)),
mutability(mutability) {}
@@ -561,8 +558,7 @@ struct TableTypeImpl : ExternTypeImpl {
own<ValType> element;
Limits limits;
- TableTypeImpl(own<ValType>& element, // NOLINT(runtime/references)
- Limits limits)
+ TableTypeImpl(own<ValType>& element, Limits limits)
: ExternTypeImpl(EXTERN_TABLE),
element(std::move(element)),
limits(limits) {}
@@ -653,9 +649,7 @@ struct ImportTypeImpl {
Name name;
own<ExternType> type;
- ImportTypeImpl(Name& module, // NOLINT(runtime/references)
- Name& name, // NOLINT(runtime/references)
- own<ExternType>& type) // NOLINT(runtime/references)
+ ImportTypeImpl(Name& module, Name& name, own<ExternType>& type)
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
@@ -696,8 +690,7 @@ struct ExportTypeImpl {
Name name;
own<ExternType> type;
- ExportTypeImpl(Name& name, // NOLINT(runtime/references)
- own<ExternType>& type) // NOLINT(runtime/references)
+ ExportTypeImpl(Name& name, own<ExternType>& type)
: name(std::move(name)), type(std::move(type)) {}
};
@@ -1357,26 +1350,6 @@ i::Handle<i::Object> WasmRefToV8(i::Isolate* isolate, const Ref* ref) {
return impl(ref)->v8_object();
}
-i::Handle<i::Object> CallTargetForCaching(i::Isolate* isolate,
- i::Address real_call_target) {
- if (i::kTaggedSize == i::kInt32Size) {
- return isolate->factory()->NewForeign(real_call_target);
- } else {
- // 64-bit uncompressed platform.
- return i::handle(i::Smi((real_call_target << i::kSmiTagSize) | i::kSmiTag),
- isolate);
- }
-}
-
-i::Address CallTargetFromCache(i::Object cached_call_target) {
- if (i::kTaggedSize == i::kInt32Size) {
- return i::Foreign::cast(cached_call_target).foreign_address();
- } else {
- // 64-bit uncompressed platform.
- return cached_call_target.ptr() >> i::kSmiTagSize;
- }
-}
-
void PrepareFunctionData(i::Isolate* isolate,
i::Handle<i::WasmExportedFunctionData> function_data,
const i::wasm::FunctionSig* sig,
@@ -1390,12 +1363,6 @@ void PrepareFunctionData(i::Isolate* isolate,
// Compute packed args size.
function_data->set_packed_args_size(
i::wasm::CWasmArgumentsPacker::TotalSize(sig));
- // Get call target (function table offset), and wrap it as a cacheable object
- // (pseudo-Smi or Foreign, depending on platform).
- i::Handle<i::Object> call_target = CallTargetForCaching(
- isolate,
- function_data->instance().GetCallTarget(function_data->function_index()));
- function_data->set_wasm_call_target(*call_target);
}
void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
@@ -1532,8 +1499,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
PrepareFunctionData(isolate, function_data, sig, instance->module());
i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
i::Code::cast(function_data->c_wrapper_code()), isolate);
- i::Address call_target =
- CallTargetFromCache(function_data->wasm_call_target());
+ i::Address call_target = function_data->foreign_address();
i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
PushArgs(sig, args, &packer, store);
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 987180c83f..a10190f70b 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -38,8 +38,6 @@ enum RuntimeExceptionSupport : bool {
enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false };
-enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
-
// The {CompilationEnv} encapsulates the module data that is used during
// compilation. CompilationEnvs are shareable across multiple compilations.
struct CompilationEnv {
@@ -66,8 +64,6 @@ struct CompilationEnv {
// Features enabled for this compilation.
const WasmFeatures enabled_features;
- const LowerSimd lower_simd;
-
// We assume that memories of size >= half of the virtual address space
// cannot be allocated (see https://crbug.com/1201340).
static constexpr uint32_t kMaxMemoryPagesAtRuntime = std::min(
@@ -77,8 +73,7 @@ struct CompilationEnv {
constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler,
RuntimeExceptionSupport runtime_exception_support,
- const WasmFeatures& enabled_features,
- LowerSimd lower_simd = kNoLowerSimd)
+ const WasmFeatures& enabled_features)
: module(module),
use_trap_handler(use_trap_handler),
runtime_exception_support(runtime_exception_support),
@@ -92,8 +87,7 @@ struct CompilationEnv {
module && module->has_maximum_pages ? module->maximum_pages
: max_mem_pages()) *
uint64_t{kWasmPageSize})),
- enabled_features(enabled_features),
- lower_simd(lower_simd) {}
+ enabled_features(enabled_features) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -127,6 +121,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void CancelCompilation();
+ void CancelInitialCompilation();
+
void SetError();
void SetWireBytesStorage(std::shared_ptr<WireBytesStorage>);
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index d37f718681..7927e58d84 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -539,13 +539,6 @@ struct BlockTypeImmediate {
type = value_type_reader::read_value_type<validate>(decoder, pc, &length,
module, enabled);
} else {
- if (!VALIDATE(enabled.has_mv())) {
- DecodeError<validate>(decoder, pc,
- "invalid block type %" PRId64
- ", enable with --experimental-wasm-mv",
- block_type);
- return;
- }
type = kWasmBottom;
sig_index = static_cast<uint32_t>(block_type);
}
@@ -582,18 +575,6 @@ struct BranchDepthImmediate {
};
template <Decoder::ValidateFlag validate>
-struct BranchOnExceptionImmediate {
- BranchDepthImmediate<validate> depth;
- ExceptionIndexImmediate<validate> index;
- uint32_t length = 0;
- inline BranchOnExceptionImmediate(Decoder* decoder, const byte* pc)
- : depth(BranchDepthImmediate<validate>(decoder, pc)),
- index(ExceptionIndexImmediate<validate>(decoder, pc + depth.length)) {
- length = depth.length + index.length;
- }
-};
-
-template <Decoder::ValidateFlag validate>
struct FunctionIndexImmediate {
uint32_t index = 0;
uint32_t length = 1;
@@ -965,8 +946,10 @@ enum Reachability : uint8_t {
template <typename Value, Decoder::ValidateFlag validate>
struct ControlBase : public PcForErrors<validate> {
ControlKind kind = kControlBlock;
- uint32_t locals_count = 0;
- uint32_t stack_depth = 0; // stack height at the beginning of the construct.
+ uint32_t locals_count = 0; // Additional locals introduced in this 'let'.
+ uint32_t stack_depth = 0; // Stack height at the beginning of the construct.
+ int32_t previous_catch = -1; // Depth of the innermost catch containing this
+ // 'try'.
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
@@ -1037,7 +1020,6 @@ struct ControlBase : public PcForErrors<validate> {
F(If, const Value& cond, Control* if_block) \
F(FallThruTo, Control* c) \
F(PopControl, Control* block) \
- F(EndControl, Control* block) \
/* Instructions: */ \
F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
@@ -1099,6 +1081,7 @@ struct ControlBase : public PcForErrors<validate> {
F(ReturnCallIndirect, const Value& index, \
const CallIndirectImmediate<validate>& imm, const Value args[]) \
F(BrOnNull, const Value& ref_object, uint32_t depth) \
+ F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
const Vector<Value> inputs, Value* result) \
@@ -1160,6 +1143,8 @@ struct ControlBase : public PcForErrors<validate> {
F(AssertNull, const Value& obj, Value* result) \
F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
uint32_t depth) \
+ F(BrOnCastFail, const Value& obj, const Value& rtt, \
+ Value* result_on_fallthrough, uint32_t depth) \
F(RefIsData, const Value& object, Value* result) \
F(RefAsData, const Value& object, Value* result) \
F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
@@ -1184,9 +1169,7 @@ class WasmDecoder : public Decoder {
module_(module),
enabled_(enabled),
detected_(detected),
- sig_(sig) {
- if (sig_ && sig_->return_count() > 1) detected_->Add(kFeature_mv);
- }
+ sig_(sig) {}
Zone* zone() const { return local_types_.get_allocator().zone(); }
@@ -1433,9 +1416,6 @@ class WasmDecoder : public Decoder {
inline bool Complete(CallFunctionImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) return false;
imm.sig = module_->functions[imm.index].sig;
- if (imm.sig->return_count() > 1) {
- this->detected_->Add(kFeature_mv);
- }
return true;
}
@@ -1450,13 +1430,11 @@ class WasmDecoder : public Decoder {
inline bool Complete(CallIndirectImmediate<validate>& imm) {
if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
- if (imm.sig->return_count() > 1) {
- this->detected_->Add(kFeature_mv);
- }
return true;
}
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
+ // Validate immediate table index.
if (!VALIDATE(imm.table_index < module_->tables.size())) {
DecodeError(pc, "call_indirect: table index immediate out of bounds");
return false;
@@ -1468,10 +1446,13 @@ class WasmDecoder : public Decoder {
imm.table_index);
return false;
}
+
+ // Validate immediate signature index.
if (!Complete(imm)) {
DecodeError(pc, "invalid signature index: #%u", imm.sig_index);
return false;
}
+
// Check that the dynamic signature for this call is a subtype of the static
// type of the table the function is defined in.
ValueType immediate_type = ValueType::Ref(imm.sig_index, kNonNullable);
@@ -1480,6 +1461,7 @@ class WasmDecoder : public Decoder {
"call_indirect: Immediate signature #%u is not a subtype of "
"immediate table #%u",
imm.sig_index, imm.table_index);
+ return false;
}
return true;
}
@@ -1503,13 +1485,6 @@ class WasmDecoder : public Decoder {
return checkAvailable(imm.table_count);
}
- inline bool Validate(const byte* pc,
- BranchOnExceptionImmediate<validate>& imm,
- size_t control_size) {
- return Validate(pc, imm.depth, control_size) &&
- Validate(pc + imm.depth.length, imm.index);
- }
-
inline bool Validate(const byte* pc, WasmOpcode opcode,
SimdLaneImmediate<validate>& imm) {
uint8_t num_lanes = 0;
@@ -1573,9 +1548,6 @@ class WasmDecoder : public Decoder {
if (imm.type != kWasmBottom) return true;
if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
- if (imm.sig->return_count() > 1) {
- this->detected_->Add(kFeature_mv);
- }
return true;
}
@@ -1709,6 +1681,7 @@ class WasmDecoder : public Decoder {
case kExprBr:
case kExprBrIf:
case kExprBrOnNull:
+ case kExprBrOnNonNull:
case kExprDelegate: {
BranchDepthImmediate<validate> imm(decoder, pc + 1);
return 1 + imm.length;
@@ -1957,6 +1930,7 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
case kExprBrOnCast:
+ case kExprBrOnCastFail:
case kExprBrOnData:
case kExprBrOnFunc:
case kExprBrOnI31: {
@@ -2043,6 +2017,7 @@ class WasmDecoder : public Decoder {
case kExprBrIf:
case kExprBrTable:
case kExprIf:
+ case kExprBrOnNonNull:
return {1, 0};
case kExprLocalGet:
case kExprGlobalGet:
@@ -2137,6 +2112,7 @@ class WasmDecoder : public Decoder {
case kExprRefTest:
case kExprRefCast:
case kExprBrOnCast:
+ case kExprBrOnCastFail:
return {2, 1};
case kExprArraySet:
return {3, 0};
@@ -2183,12 +2159,22 @@ MemoryAccessImmediate<validate>::MemoryAccessImmediate(
: MemoryAccessImmediate(decoder, pc, max_alignment,
decoder->module_->is_memory64) {}
+// Only call this in contexts where {current_code_reachable_and_ok_} is known to
+// hold.
+#define CALL_INTERFACE(name, ...) \
+ do { \
+ DCHECK(!control_.empty()); \
+ DCHECK(current_code_reachable_and_ok_); \
+ DCHECK_EQ(current_code_reachable_and_ok_, \
+ this->ok() && control_.back().reachable()); \
+ interface_.name(this, ##__VA_ARGS__); \
+ } while (false)
#define CALL_INTERFACE_IF_OK_AND_REACHABLE(name, ...) \
do { \
DCHECK(!control_.empty()); \
DCHECK_EQ(current_code_reachable_and_ok_, \
this->ok() && control_.back().reachable()); \
- if (current_code_reachable_and_ok_) { \
+ if (V8_LIKELY(current_code_reachable_and_ok_)) { \
interface_.name(this, ##__VA_ARGS__); \
} \
} while (false)
@@ -2289,33 +2275,39 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return WasmOpcodes::OpcodeName(opcode);
}
- inline WasmCodePosition position() {
+ WasmCodePosition position() const {
int offset = static_cast<int>(this->pc_ - this->start_);
DCHECK_EQ(this->pc_ - this->start_, offset); // overflows cannot happen
return offset;
}
- inline uint32_t control_depth() const {
+ uint32_t control_depth() const {
return static_cast<uint32_t>(control_.size());
}
- inline Control* control_at(uint32_t depth) {
+ Control* control_at(uint32_t depth) {
DCHECK_GT(control_.size(), depth);
return &control_.back() - depth;
}
- inline uint32_t stack_size() const {
+ uint32_t stack_size() const {
DCHECK_GE(stack_end_, stack_);
DCHECK_GE(kMaxUInt32, stack_end_ - stack_);
return static_cast<uint32_t>(stack_end_ - stack_);
}
- inline Value* stack_value(uint32_t depth) {
+ Value* stack_value(uint32_t depth) const {
DCHECK_LT(0, depth);
DCHECK_GE(stack_size(), depth);
return stack_end_ - depth;
}
+ int32_t current_catch() const { return current_catch_; }
+
+ uint32_t control_depth_of_current_catch() const {
+ return control_depth() - 1 - current_catch();
+ }
+
void SetSucceedingCodeDynamicallyUnreachable() {
Control* current = &control_.back();
if (current->reachable()) {
@@ -2324,7 +2316,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
+ inline uint32_t pc_relative_offset() const {
+ return this->pc_offset() - first_instruction_offset;
+ }
+
private:
+ uint32_t first_instruction_offset = 0;
Interface interface_;
// The value stack, stored as individual pointers for maximum performance.
@@ -2340,6 +2337,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// a cache for {ok() && control_.back().reachable()}).
bool current_code_reachable_and_ok_ = true;
+ // Depth of the current try block.
+ int32_t current_catch_ = -1;
+
static Value UnreachableValue(const uint8_t* pc) {
return Value{pc, kWasmBottom};
}
@@ -2519,6 +2519,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ArgVector args = PeekArgs(imm.sig);
Control* try_block = PushControl(kControlTry, 0, args.length());
SetBlockType(try_block, imm, args.begin());
+ try_block->previous_catch = current_catch_;
+ current_catch_ = static_cast<int>(control_depth() - 1);
CALL_INTERFACE_IF_OK_AND_REACHABLE(Try, try_block);
DropArgs(imm.sig);
PushMergeValues(try_block, &try_block->start_merge);
@@ -2543,7 +2545,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("catch after unwind for try");
return 0;
}
- FallThruTo(c);
+ FallThrough();
c->kind = kControlTryCatch;
// TODO(jkummerow): Consider moving the stack manipulation after the
// INTERFACE call for consistency.
@@ -2556,6 +2558,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Push(CreateValue(sig->GetParam(i)));
}
Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
+ current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
current_code_reachable_and_ok_ = this->ok() && c->reachable();
return 1 + imm.length;
@@ -2584,11 +2587,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"cannot delegate inside the catch handler of the target");
return 0;
}
- FallThruTo(c);
+ FallThrough();
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
- current_code_reachable_and_ok_ = this->ok() && control_.back().reachable();
+ current_catch_ = c->previous_catch;
EndControl();
- PopControl(c);
+ PopControl();
return 1 + imm.length;
}
@@ -2608,9 +2611,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("cannot have catch-all after unwind");
return 0;
}
- FallThruTo(c);
+ FallThrough();
c->kind = kControlTryCatchAll;
c->reachability = control_at(1)->innerReachability();
+ current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
stack_end_ = stack_ + c->stack_depth;
current_code_reachable_and_ok_ = this->ok() && c->reachable();
@@ -2630,9 +2634,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("catch, catch-all or unwind already present for try");
return 0;
}
- FallThruTo(c);
+ FallThrough();
c->kind = kControlTryUnwind;
c->reachability = control_at(1)->innerReachability();
+ current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
stack_end_ = stack_ + c->stack_depth;
current_code_reachable_and_ok_ = this->ok() && c->reachable();
@@ -2645,33 +2650,69 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value ref_object = Peek(0, 0);
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 1);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 1))) return 0;
switch (ref_object.type.kind()) {
case kBottom:
// We are in a polymorphic stack. Leave the stack as it is.
- DCHECK(check_result != kReachableBranch);
+ DCHECK(!current_code_reachable_and_ok_);
break;
case kRef:
// For a non-nullable value, we won't take the branch, and can leave
// the stack as it is.
break;
case kOptRef: {
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnNull, ref_object, imm.depth);
Value result = CreateValue(
ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
// The result of br_on_null has the same value as the argument (but a
// non-nullable type).
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Forward, ref_object, &result);
- c->br_merge()->reached = true;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ CALL_INTERFACE(Forward, ref_object, &result);
+ c->br_merge()->reached = true;
+ }
+ // In unreachable code, we still have to push a value of the correct
+ // type onto the stack.
Drop(ref_object);
Push(result);
- } else {
- // Even in non-reachable code, we need to push a value of the correct
- // type to the stack.
- Drop(ref_object);
- Push(CreateValue(
- ValueType::Ref(ref_object.type.heap_type(), kNonNullable)));
+ break;
+ }
+ default:
+ PopTypeError(0, ref_object, "object reference");
+ return 0;
+ }
+ return 1 + imm.length;
+ }
+
+ DECODE(BrOnNonNull) {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
+ Value ref_object = Peek(0, 0, kWasmAnyRef);
+ Drop(ref_object);
+ // Typechecking the branch and creating the branch merges requires the
+ // non-null value on the stack, so we push it temporarily.
+ Value result = CreateValue(ref_object.type.AsNonNull());
+ Push(result);
+ Control* c = control_at(imm.depth);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ switch (ref_object.type.kind()) {
+ case kBottom:
+ // We are in unreachable code. Do nothing.
+ DCHECK(!current_code_reachable_and_ok_);
+ break;
+ case kRef:
+ // For a non-nullable value, we always take the branch.
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(Forward, ref_object, stack_value(1));
+ CALL_INTERFACE(BrOrRet, imm.depth, 0);
+ c->br_merge()->reached = true;
+ }
+ break;
+ case kOptRef: {
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(Forward, ref_object, stack_value(1));
+ CALL_INTERFACE(BrOnNonNull, ref_object, imm.depth);
+ c->br_merge()->reached = true;
}
break;
}
@@ -2679,6 +2720,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(0, ref_object, "object reference");
return 0;
}
+ // If we stay in the branch, {ref_object} is null. Drop it from the stack.
+ Drop(result);
return 1 + imm.length;
}
@@ -2751,7 +2794,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("else already present for if");
return 0;
}
- if (!TypeCheckFallThru()) return 0;
+ if (!VALIDATE(TypeCheckFallThru())) return 0;
c->kind = kControlIfElse;
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Else, c);
if (c->reachable()) c->end_merge.reached = true;
@@ -2764,27 +2807,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(End) {
DCHECK(!control_.empty());
Control* c = &control_.back();
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->DecodeError("missing catch or catch-all in try");
- return 0;
- }
- if (c->is_onearmed_if()) {
- if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->DecodeError(
- c->pc(), "start-arity and end-arity of one-armed if must match");
- return 0;
- }
- if (!TypeCheckOneArmedIf(c)) return 0;
- }
if (c->is_try_catch()) {
// Emulate catch-all + re-throw.
- FallThruTo(c);
+ FallThrough();
c->reachability = control_at(1)->innerReachability();
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
current_code_reachable_and_ok_ =
this->ok() && control_.back().reachable();
CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
EndControl();
+ PopControl();
+ return 1;
+ }
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->DecodeError("missing catch or catch-all in try");
+ return 0;
+ }
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(TypeCheckOneArmedIf(c))) return 0;
}
if (c->is_try_unwind()) {
// Unwind implicitly rethrows at the end.
@@ -2798,7 +2838,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->local_types_.begin() + c->locals_count);
this->num_locals_ -= c->locals_count;
}
- if (!TypeCheckFallThru()) return 0;
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
@@ -2809,11 +2848,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// The result of the block is the return value.
trace_msg->Append("\n" TRACE_INST_FORMAT, startrel(this->pc_),
"(implicit) return");
- DoReturn();
+ DoReturn<kStrictCounting, kFallthroughMerge>();
control_.clear();
return 1;
}
- PopControl(c);
+
+ if (!VALIDATE(TypeCheckFallThru())) return 0;
+ PopControl();
return 1;
}
@@ -2853,9 +2894,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, false, 0);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOrRet, imm.depth, 0);
+ if (!VALIDATE(TypeCheckBranch<false>(c, 0))) return 0;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOrRet, imm.depth, 0);
c->br_merge()->reached = true;
}
EndControl();
@@ -2867,9 +2908,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value cond = Peek(0, 0, kWasmI32);
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 1);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrIf, cond, imm.depth);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 1))) return 0;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrIf, cond, imm.depth);
c->br_merge()->reached = true;
}
Drop(cond);
@@ -2887,40 +2928,38 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// all branch targets as reachable after the {CALL_INTERFACE} call.
std::vector<bool> br_targets(control_.size());
- // The result types of the br_table instruction. We have to check the
- // stack against these types. Only needed during validation.
- std::vector<ValueType> result_types;
+ uint32_t arity = 0;
while (iterator.has_next()) {
const uint32_t index = iterator.cur_index();
const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) return 0;
+ const uint32_t target = iterator.next();
+ if (!VALIDATE(target < control_depth())) {
+ this->DecodeError(pos, "invalid branch depth: %u", target);
+ return 0;
+ }
// Avoid redundant branch target checks.
if (br_targets[target]) continue;
br_targets[target] = true;
if (validate) {
if (index == 0) {
- // With the first branch target, initialize the result types.
- result_types = InitializeBrTableResultTypes(target);
- } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
- index)) {
+ arity = control_at(target)->br_merge()->arity;
+ } else if (!VALIDATE(control_at(target)->br_merge()->arity == arity)) {
+ this->DecodeError(
+ pos, "br_table: label arity inconsistent with previous arity %d",
+ arity);
return 0;
}
+ if (!VALIDATE(TypeCheckBranch<false>(control_at(target), 1))) return 0;
}
}
- if (!VALIDATE(TypeCheckBrTable(result_types, 1))) return 0;
-
- DCHECK(this->ok());
-
- if (current_code_reachable_and_ok_) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrTable, imm, key);
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrTable, imm, key);
- for (int i = 0, e = control_depth(); i < e; ++i) {
- if (!br_targets[i]) continue;
- control_at(i)->br_merge()->reached = true;
+ for (uint32_t i = 0; i < control_depth(); ++i) {
+ control_at(i)->br_merge()->reached |= br_targets[i];
}
}
Drop(key);
@@ -2929,22 +2968,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Return) {
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
- if (!VALIDATE(TypeCheckReturn())) return 0;
- DoReturn();
- } else {
- // We inspect all return values from the stack to check their type.
- // Since we deal with unreachable code, we do not have to keep the
- // values.
- int num_returns = static_cast<int>(this->sig_->return_count());
- for (int i = num_returns - 1, depth = 0; i >= 0; --i, ++depth) {
- Peek(depth, i, this->sig_->GetReturn(i));
- }
- Drop(num_returns);
- }
-
- EndControl();
- return 1;
+ return DoReturn<kNonStrictCounting, kReturnMerge>() ? 1 : 0;
}
DECODE(Unreachable) {
@@ -3409,6 +3433,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE_IMPL(CatchAll);
DECODE_IMPL(Unwind);
DECODE_IMPL(BrOnNull);
+ DECODE_IMPL(BrOnNonNull);
DECODE_IMPL(Let);
DECODE_IMPL(Loop);
DECODE_IMPL(If);
@@ -3490,6 +3515,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_OK_AND_REACHABLE(StartFunctionBody, c);
}
+ first_instruction_offset = this->pc_offset();
// Decode the function body.
while (this->pc_ < this->end_) {
// Most operations only grow the stack by at least one element (unary and
@@ -3526,7 +3552,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* current = &control_.back();
DCHECK_LE(stack_ + current->stack_depth, stack_end_);
stack_end_ = stack_ + current->stack_depth;
- CALL_INTERFACE_IF_OK_AND_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
current_code_reachable_and_ok_ = false;
}
@@ -3642,11 +3667,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &control_.back();
}
- void PopControl(Control* c) {
+ void PopControl() {
// This cannot be the outermost control block.
DCHECK_LT(1, control_.size());
+ Control* c = &control_.back();
+ DCHECK_LE(stack_ + c->stack_depth, stack_end_);
- DCHECK_EQ(c, &control_.back());
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(PopControl, c);
// A loop just leaves the values on the stack.
@@ -3658,7 +3684,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// If the parent block was reachable before, but the popped control does not
// return to here, this block becomes "spec only reachable".
if (!parent_reached) SetSucceedingCodeDynamicallyUnreachable();
- current_code_reachable_and_ok_ = control_.back().reachable();
+ current_code_reachable_and_ok_ = this->ok() && control_.back().reachable();
}
int DecodeLoadMem(LoadType type, int prefix_len = 1) {
@@ -3739,92 +3765,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return prefix_len + imm.length;
}
- bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
- if (!VALIDATE(target < this->control_.size())) {
- this->DecodeError(pos, "improper branch in br_table target %u (depth %u)",
- index, target);
- return false;
- }
- return true;
- }
-
- std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
- Merge<Value>* merge = control_at(target)->br_merge();
- int br_arity = merge->arity;
- std::vector<ValueType> result(br_arity);
- for (int i = 0; i < br_arity; ++i) {
- result[i] = (*merge)[i].type;
- }
- return result;
- }
-
- bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
- uint32_t target, const byte* pos, int index) {
- Merge<Value>* merge = control_at(target)->br_merge();
- int br_arity = merge->arity;
- // First we check if the arities match.
- if (!VALIDATE(br_arity == static_cast<int>(result_types->size()))) {
- this->DecodeError(pos,
- "inconsistent arity in br_table target %u (previous "
- "was %zu, this one is %u)",
- index, result_types->size(), br_arity);
- return false;
- }
-
- for (int i = 0; i < br_arity; ++i) {
- if (this->enabled_.has_reftypes()) {
- // The expected type is the biggest common sub type of all targets.
- (*result_types)[i] =
- CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
- } else {
- // All target must have the same signature.
- if (!VALIDATE((*result_types)[i] == (*merge)[i].type)) {
- this->DecodeError(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, (*result_types)[i].name().c_str(),
- (*merge)[i].type.name().c_str());
- return false;
- }
- }
- }
- return true;
- }
-
- bool TypeCheckBrTable(const std::vector<ValueType>& result_types,
- uint32_t drop_values) {
- int br_arity = static_cast<int>(result_types.size());
- if (V8_LIKELY(!control_.back().unreachable())) {
- int available =
- static_cast<int>(stack_size()) - control_.back().stack_depth;
- available -= std::min(available, static_cast<int>(drop_values));
- // There have to be enough values on the stack.
- if (!VALIDATE(available >= br_arity)) {
- this->DecodeError(
- "expected %u elements on the stack for branch to @%d, found %u",
- br_arity, startrel(control_.back().pc()), available);
- return false;
- }
- Value* stack_values = stack_end_ - br_arity - drop_values;
- // Type-check the topmost br_arity values on the stack.
- for (int i = 0; i < br_arity; ++i) {
- Value& val = stack_values[i];
- if (!VALIDATE(IsSubtypeOf(val.type, result_types[i], this->module_))) {
- this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].name().c_str(),
- val.type.name().c_str());
- return false;
- }
- }
- } else { // !control_.back().reachable()
- // Type-check the values on the stack.
- for (int i = 0; i < br_arity; ++i) {
- Peek(i + drop_values, i + 1, result_types[i]);
- }
- }
- return this->ok();
- }
-
uint32_t SimdConstOp(uint32_t opcode_length) {
Simd128Immediate<validate> imm(this, this->pc_ + opcode_length);
Value result = CreateValue(kWasmS128);
@@ -4377,7 +4317,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
CALL_INTERFACE_IF_OK_AND_REACHABLE(AssertNull, obj, &value);
} else {
- // TODO(manoskouk): Change the trap label.
CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap,
TrapReason::kTrapIllegalCast);
EndControl();
@@ -4420,33 +4359,92 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// significantly more convenient to pass around the values that
// will be on the stack when the branch is taken.
// TODO(jkummerow): Reconsider this choice.
- Drop(2); // {obj} and {ret}.
+ Drop(2); // {obj} and {rtt}.
Value result_on_branch = CreateValue(
rtt.type.is_bottom()
? kWasmBottom
: ValueType::Ref(rtt.type.ref_index(), kNonNullable));
Push(result_on_branch);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 0);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- // This logic ensures that code generation can assume that functions
- // can only be cast to function types, and data objects to data types.
- if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- // The {value_on_branch} parameter we pass to the interface must
- // be pointer-identical to the object on the stack, so we can't
- // reuse {result_on_branch} which was passed-by-value to {Push}.
- Value* value_on_branch = stack_value(1);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(
- BrOnCast, obj, rtt, value_on_branch, branch_depth.depth);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
+ // The {value_on_branch} parameter we pass to the interface must
+ // be pointer-identical to the object on the stack, so we can't
+ // reuse {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnCast, obj, rtt, value_on_branch,
+ branch_depth.depth);
c->br_merge()->reached = true;
}
- // Otherwise the types are unrelated. Do not branch.
- } else if (check_result == kInvalidStack) {
- return 0;
}
+ // Otherwise the types are unrelated. Do not branch.
Drop(result_on_branch);
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
+ case kExprBrOnCastFail: {
+ BranchDepthImmediate<validate> branch_depth(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
+ return 0;
+ }
+ Value rtt = Peek(0, 1);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ Value obj = Peek(1, 0);
+ if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
+ IsSubtypeOf(obj.type,
+ ValueType::Ref(HeapType::kData, kNullable),
+ this->module_) ||
+ obj.type.is_bottom())) {
+ PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
+ return 0;
+ }
+ Control* c = control_at(branch_depth.depth);
+ if (c->br_merge()->arity == 0) {
+ this->DecodeError(
+ "br_on_cast_fail must target a branch of arity at least 1");
+ return 0;
+ }
+ // Attention: contrary to most other instructions, we modify the stack
+ // before calling the interface function. This makes it significantly
+ // more convenient to pass around the values that will be on the stack
+ // when the branch is taken. In this case, we leave {obj} on the stack
+ // to type check the branch.
+ // TODO(jkummerow): Reconsider this choice.
+ Drop(rtt);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ Value result_on_fallthrough = CreateValue(
+ rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.ref_index(), kNonNullable));
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
+ CALL_INTERFACE(BrOnCastFail, obj, rtt, &result_on_fallthrough,
+ branch_depth.depth);
+ } else {
+ // Drop {rtt} in the interface.
+ CALL_INTERFACE(Drop);
+ // Otherwise the types are unrelated. Always branch.
+ CALL_INTERFACE(BrOrRet, branch_depth.depth, 0);
+ // We know that the following code is not reachable, but according
+ // to the spec it technically is. Set it to spec-only reachable.
+ SetSucceedingCodeDynamicallyUnreachable();
+ }
+ c->br_merge()->reached = true;
+ }
+ // Make sure the correct value is on the stack state on fallthrough.
+ Drop(obj);
+ Push(result_on_fallthrough);
+ return opcode_length + branch_depth.length;
+ }
#define ABSTRACT_TYPE_CHECK(heap_type) \
case kExprRefIs##heap_type: { \
Value arg = Peek(0, 0, kWasmAnyRef); \
@@ -4510,25 +4508,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value result_on_branch =
CreateValue(ValueType::Ref(heap_type, kNonNullable));
Push(result_on_branch);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 0);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- // The {value_on_branch} parameter we pass to the interface must be
- // pointer-identical to the object on the stack, so we can't reuse
- // {result_on_branch} which was passed-by-value to {Push}.
- Value* value_on_branch = stack_value(1);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ // The {value_on_branch} parameter we pass to the interface must be
+ // pointer-identical to the object on the stack, so we can't reuse
+ // {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
if (opcode == kExprBrOnFunc) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnFunc, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnFunc, obj, value_on_branch, branch_depth.depth);
} else if (opcode == kExprBrOnData) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnData, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnData, obj, value_on_branch, branch_depth.depth);
} else {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnI31, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnI31, obj, value_on_branch, branch_depth.depth);
}
c->br_merge()->reached = true;
- } else if (check_result == kInvalidStack) {
- return 0;
}
Drop(result_on_branch);
Push(obj); // Restore stack state on fallthrough.
@@ -4714,11 +4707,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- void DoReturn() {
- DCHECK_GE(stack_size(), this->sig_->return_count());
- CALL_INTERFACE_IF_OK_AND_REACHABLE(DoReturn, 0);
- }
-
V8_INLINE void EnsureStackSpace(int slots_needed) {
if (V8_LIKELY(stack_capacity_end_ - stack_end_ >= slots_needed)) return;
GrowStackSpace(slots_needed);
@@ -4842,7 +4830,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// TODO(wasm): This check is often redundant.
if (V8_UNLIKELY(stack_size() < limit + count)) {
// Popping past the current control start in reachable code.
- if (!VALIDATE(!control_.back().reachable())) {
+ if (!VALIDATE(!current_code_reachable_and_ok_)) {
NotEnoughArgumentsError(0);
}
// Pop what we can.
@@ -4854,188 +4842,152 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// For more descriptive call sites:
V8_INLINE void Drop(const Value& /* unused */) { Drop(1); }
- // Pops values from the stack, as defined by {merge}. Thereby we type-check
- // unreachable merges. Afterwards the values are pushed again on the stack
- // according to the signature in {merge}. This is done so follow-up validation
- // is possible.
- bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch,
- uint32_t drop_values = 0) {
- int arity = merge.arity;
- // For conditional branches, stack value '0' is the condition of the branch,
- // and the result values start at index '1'.
- int index_offset = conditional_branch ? 1 : 0;
+ enum StackElementsCountMode : bool {
+ kNonStrictCounting = false,
+ kStrictCounting = true
+ };
+
+ enum MergeType { kBranchMerge, kReturnMerge, kFallthroughMerge };
+
+ // - If the current code is reachable, check if the current stack values are
+ // compatible with {merge} based on their number and types. Disregard the
+ // first {drop_values} on the stack. If {strict_count}, check that
+ // #(stack elements) == {merge->arity}, otherwise
+ // #(stack elements) >= {merge->arity}.
+ // - If the current code is unreachable, check if any values that may exist on
+ // top of the stack are compatible with {merge}. If {push_branch_values},
+ // push back to the stack values based on the type of {merge} (this is
+ // needed for conditional branches due to their typing rules, and
+ // fallthroughs so that the outer control finds the expected values on the
+ // stack). TODO(manoskouk): We expect the unreachable-code behavior to
+ // change, either due to relaxation of dead code verification, or the
+ // introduction of subtyping.
+ template <StackElementsCountMode strict_count, bool push_branch_values,
+ MergeType merge_type>
+ bool TypeCheckStackAgainstMerge(uint32_t drop_values, Merge<Value>* merge) {
+ static_assert(validate, "Call this function only within VALIDATE");
+ constexpr const char* merge_description =
+ merge_type == kBranchMerge
+ ? "branch"
+ : merge_type == kReturnMerge ? "return" : "fallthru";
+ uint32_t arity = merge->arity;
+ uint32_t actual = stack_size() - control_.back().stack_depth;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ if (V8_UNLIKELY(strict_count ? actual != drop_values + arity
+ : actual < drop_values + arity)) {
+ this->DecodeError("expected %u elements on the stack for %s, found %u",
+ arity, merge_description,
+ actual >= drop_values ? actual - drop_values : 0);
+ return false;
+ }
+ // Typecheck the topmost {merge->arity} values on the stack.
+ Value* stack_values = stack_end_ - (arity + drop_values);
+ for (uint32_t i = 0; i < arity; ++i) {
+ Value& val = stack_values[i];
+ Value& old = (*merge)[i];
+ if (!IsSubtypeOf(val.type, old.type, this->module_)) {
+ this->DecodeError("type error in %s[%u] (expected %s, got %s)",
+ merge_description, i, old.type.name().c_str(),
+ val.type.name().c_str());
+ return false;
+ }
+ }
+ return true;
+ }
+ // Unreachable code validation starts here.
+ if (V8_UNLIKELY(strict_count && actual > drop_values + arity)) {
+ this->DecodeError("expected %u elements on the stack for %s, found %u",
+ arity, merge_description,
+ actual >= drop_values ? actual - drop_values : 0);
+ return false;
+ }
+ // TODO(manoskouk): Use similar code as above if we keep unreachable checks.
for (int i = arity - 1, depth = drop_values; i >= 0; --i, ++depth) {
- Peek(depth, index_offset + i, merge[i].type);
- }
- // Push values of the correct type onto the stack.
- Drop(drop_values);
- Drop(arity);
- // {Drop} is adaptive for polymorphic stacks: it might drop fewer values
- // than requested. So ensuring stack space here is not redundant.
- EnsureStackSpace(arity + drop_values);
- for (int i = 0; i < arity; i++) Push(CreateValue(merge[i].type));
- // {drop_values} are about to be dropped anyway, so we can forget their
- // previous types, but we do have to maintain the correct stack height.
- for (uint32_t i = 0; i < drop_values; i++) {
- Push(UnreachableValue(this->pc_));
+ Peek(depth, i, (*merge)[i].type);
+ }
+ if (push_branch_values) {
+ Drop(drop_values);
+ Drop(arity);
+ // {Drop} is adaptive for polymorphic stacks: it might drop fewer values
+ // than requested. So ensuring stack space here is not redundant.
+ EnsureStackSpace(drop_values + arity);
+ // Push values of the correct type onto the stack.
+ for (int i = 0; i < static_cast<int>(arity); i++) {
+ Push(CreateValue((*merge)[i].type));
+ }
+ // {drop_values} are about to be dropped anyway, so we can forget their
+ // previous types, but we do have to maintain the correct stack height.
+ for (uint32_t i = 0; i < drop_values; i++) {
+ Push(UnreachableValue(this->pc_));
+ }
}
return this->ok();
}
+ template <StackElementsCountMode strict_count, MergeType merge_type>
+ bool DoReturn() {
+ if (!VALIDATE((TypeCheckStackAgainstMerge<strict_count, false, merge_type>(
+ 0, &control_.front().end_merge)))) {
+ return false;
+ }
+ DCHECK_IMPLIES(current_code_reachable_and_ok_,
+ stack_size() >= this->sig_->return_count());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(DoReturn, 0);
+ EndControl();
+ return true;
+ }
+
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- void FallThruTo(Control* c) {
- DCHECK_EQ(c, &control_.back());
+ void FallThrough() {
+ Control* c = &control_.back();
DCHECK_NE(c->kind, kControlLoop);
- if (!TypeCheckFallThru()) return;
+ if (!VALIDATE(TypeCheckFallThru())) return;
CALL_INTERFACE_IF_OK_AND_REACHABLE(FallThruTo, c);
if (c->reachable()) c->end_merge.reached = true;
}
- bool TypeCheckMergeValues(Control* c, uint32_t drop_values,
- Merge<Value>* merge) {
- static_assert(validate, "Call this function only within VALIDATE");
- DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- DCHECK_GE(stack_size() - drop_values, c->stack_depth + merge->arity);
- Value* stack_values = stack_value(merge->arity + drop_values);
- // Typecheck the topmost {merge->arity} values on the stack.
- for (uint32_t i = 0; i < merge->arity; ++i) {
- Value& val = stack_values[i];
- Value& old = (*merge)[i];
- if (!VALIDATE(IsSubtypeOf(val.type, old.type, this->module_))) {
- this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
- old.type.name().c_str(), val.type.name().c_str());
- return false;
- }
- }
-
- return true;
- }
-
bool TypeCheckOneArmedIf(Control* c) {
static_assert(validate, "Call this function only within VALIDATE");
DCHECK(c->is_onearmed_if());
- DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
+ if (c->end_merge.arity != c->start_merge.arity) {
+ this->DecodeError(c->pc(),
+ "start-arity and end-arity of one-armed if must match");
+ return false;
+ }
for (uint32_t i = 0; i < c->start_merge.arity; ++i) {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
- if (!VALIDATE(IsSubtypeOf(start.type, end.type, this->module_))) {
+ if (!IsSubtypeOf(start.type, end.type, this->module_)) {
this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
end.type.name().c_str(), start.type.name().c_str());
return false;
}
}
-
return true;
}
bool TypeCheckFallThru() {
static_assert(validate, "Call this function only within VALIDATE");
- Control& c = control_.back();
- if (V8_LIKELY(c.reachable())) {
- uint32_t expected = c.end_merge.arity;
- DCHECK_GE(stack_size(), c.stack_depth);
- uint32_t actual = stack_size() - c.stack_depth;
- // Fallthrus must match the arity of the control exactly.
- if (!VALIDATE(actual == expected)) {
- this->DecodeError(
- "expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c.pc()), actual);
- return false;
- }
- if (expected == 0) return true; // Fast path.
-
- return TypeCheckMergeValues(&c, 0, &c.end_merge);
- }
-
- // Type-check an unreachable fallthru. First we do an arity check, then a
- // type check. Note that type-checking may require an adjustment of the
- // stack, if some stack values are missing to match the block signature.
- Merge<Value>& merge = c.end_merge;
- int arity = static_cast<int>(merge.arity);
- int available = static_cast<int>(stack_size()) - c.stack_depth;
- // For fallthrus, not more than the needed values should be available.
- if (!VALIDATE(available <= arity)) {
- this->DecodeError(
- "expected %u elements on the stack for fallthru to @%d, found %u",
- arity, startrel(c.pc()), available);
- return false;
- }
- // Pop all values from the stack for type checking of existing stack
- // values.
- return TypeCheckUnreachableMerge(merge, false);
+ return TypeCheckStackAgainstMerge<kStrictCounting, true, kFallthroughMerge>(
+ 0, &control_.back().end_merge);
}
- enum TypeCheckBranchResult {
- kReachableBranch,
- kUnreachableBranch,
- kInvalidStack,
- };
-
- // If the type code is reachable, check if the current stack values are
+ // If the current code is reachable, check if the current stack values are
// compatible with a jump to {c}, based on their number and types.
// Otherwise, we have a polymorphic stack: check if any values that may exist
- // on top of the stack are compatible with {c}, and push back to the stack
- // values based on the type of {c}.
- TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch,
- uint32_t drop_values) {
- if (V8_LIKELY(control_.back().reachable())) {
- // We only do type-checking here. This is only needed during validation.
- if (!validate) return kReachableBranch;
-
- // Branches must have at least the number of values expected; can have
- // more.
- uint32_t expected = c->br_merge()->arity;
- if (expected == 0) return kReachableBranch; // Fast path.
- uint32_t limit = control_.back().stack_depth;
- if (!VALIDATE(stack_size() >= limit + drop_values + expected)) {
- uint32_t actual = stack_size() - limit;
- actual -= std::min(actual, drop_values);
- this->DecodeError(
- "expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc()), actual);
- return kInvalidStack;
- }
- return TypeCheckMergeValues(c, drop_values, c->br_merge())
- ? kReachableBranch
- : kInvalidStack;
- }
-
- return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch,
- drop_values)
- ? kUnreachableBranch
- : kInvalidStack;
- }
-
- bool TypeCheckReturn() {
- int num_returns = static_cast<int>(this->sig_->return_count());
- // No type checking is needed if there are no returns.
- if (num_returns == 0) return true;
-
- // Returns must have at least the number of values expected; can have more.
- int num_available =
- static_cast<int>(stack_size()) - control_.back().stack_depth;
- if (!VALIDATE(num_available >= num_returns)) {
- this->DecodeError(
- "expected %u elements on the stack for return, found %u", num_returns,
- num_available);
- return false;
- }
-
- // Typecheck the topmost {num_returns} values on the stack.
- // This line requires num_returns > 0.
- Value* stack_values = stack_end_ - num_returns;
- for (int i = 0; i < num_returns; ++i) {
- Value& val = stack_values[i];
- ValueType expected_type = this->sig_->GetReturn(i);
- if (!VALIDATE(IsSubtypeOf(val.type, expected_type, this->module_))) {
- this->DecodeError("type error in return[%u] (expected %s, got %s)", i,
- expected_type.name().c_str(),
- val.type.name().c_str());
- return false;
- }
- }
- return true;
+ // on top of the stack are compatible with {c}. If {push_branch_values},
+ // push back to the stack values based on the type of {c} (this is needed for
+ // conditional branches due to their typing rules, and fallthroughs so that
+ // the outer control finds enough values on the stack).
+ // {drop_values} is the number of stack values that will be dropped before the
+ // branch is taken. This is currently 1 for for br (condition), br_table
+ // (index) and br_on_null (reference), and 0 for all other branches.
+ template <bool push_branch_values>
+ bool TypeCheckBranch(Control* c, uint32_t drop_values) {
+ static_assert(validate, "Call this function only within VALIDATE");
+ return TypeCheckStackAgainstMerge<kNonStrictCounting, push_branch_values,
+ kBranchMerge>(drop_values, c->br_merge());
}
void onFirstError() override {
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 4303344f13..4a2db3d496 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -306,7 +306,8 @@ JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
const WasmModule* module, bool is_import,
const WasmFeatures& enabled_features, AllowGeneric allow_generic)
- : is_import_(is_import),
+ : isolate_(isolate),
+ is_import_(is_import),
sig_(sig),
use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) &&
!is_import),
@@ -326,19 +327,19 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
}
-Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
+Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
Handle<Code> code;
if (use_generic_wrapper_) {
code =
- isolate->builtins()->builtin_handle(Builtins::kGenericJSToWasmWrapper);
+ isolate_->builtins()->builtin_handle(Builtins::kGenericJSToWasmWrapper);
} else {
- CompilationJob::Status status = job_->FinalizeJob(isolate);
+ CompilationJob::Status status = job_->FinalizeJob(isolate_);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
code = job_->compilation_info()->code();
}
- if (!use_generic_wrapper_ && must_record_function_compilation(isolate)) {
+ if (!use_generic_wrapper_ && must_record_function_compilation(isolate_)) {
RecordWasmHeapStubCompilation(
- isolate, code, "%s", job_->compilation_info()->GetDebugName().get());
+ isolate_, code, "%s", job_->compilation_info()->GetDebugName().get());
}
return code;
}
@@ -353,7 +354,7 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
module, is_import, enabled_features,
kAllowGeneric);
unit.Execute();
- return unit.Finalize(isolate);
+ return unit.Finalize();
}
// static
@@ -366,7 +367,7 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
module, is_import, enabled_features,
kDontAllowGeneric);
unit.Execute();
- return unit.Finalize(isolate);
+ return unit.Finalize();
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index f8d1f00a4e..80cd1a7b67 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -127,8 +127,10 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
AllowGeneric allow_generic);
~JSToWasmWrapperCompilationUnit();
+ Isolate* isolate() const { return isolate_; }
+
void Execute();
- Handle<Code> Finalize(Isolate* isolate);
+ Handle<Code> Finalize();
bool is_import() const { return is_import_; }
const FunctionSig* sig() const { return sig_; }
@@ -146,6 +148,11 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
const WasmModule* module);
private:
+ // Wrapper compilation is bound to an isolate. Concurrent accesses to the
+ // isolate (during the "Execute" phase) must be audited carefully, i.e. we
+ // should only access immutable information (like the root table). The isolate
+ // is guaranteed to be alive when this unit executes.
+ Isolate* isolate_;
bool is_import_;
const FunctionSig* sig_;
bool use_generic_wrapper_;
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index c856f4d949..a81457faa7 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -9,6 +9,7 @@
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/branch-hint-map.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
@@ -29,7 +30,7 @@ namespace {
// It maintains a control state that tracks whether the environment
// is reachable, has reached a control end, or has been merged.
struct SsaEnv : public ZoneObject {
- enum State { kControlEnd, kUnreachable, kReached, kMerged };
+ enum State { kUnreachable, kReached, kMerged };
State state;
TFNode* control;
@@ -50,11 +51,11 @@ struct SsaEnv : public ZoneObject {
effect(other.effect),
instance_cache(other.instance_cache),
locals(std::move(other.locals)) {
- other.Kill(kUnreachable);
+ other.Kill();
}
- void Kill(State new_state = kControlEnd) {
- state = new_state;
+ void Kill() {
+ state = kUnreachable;
for (TFNode*& local : locals) {
local = nullptr;
}
@@ -67,8 +68,6 @@ struct SsaEnv : public ZoneObject {
}
};
-constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
-
class WasmGraphBuildingInterface {
public:
static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
@@ -97,7 +96,7 @@ class WasmGraphBuildingInterface {
};
struct Control : public ControlBase<Value, validate> {
- SsaEnv* end_env = nullptr; // end environment for the construct.
+ SsaEnv* merge_env = nullptr; // merge environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
int32_t previous_catch = -1; // previous Control with a catch.
@@ -110,10 +109,18 @@ class WasmGraphBuildingInterface {
: ControlBase(std::forward<Args>(args)...) {}
};
- explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder)
- : builder_(builder) {}
+ explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
+ int func_index)
+ : builder_(builder), func_index_(func_index) {}
void StartFunction(FullDecoder* decoder) {
+ // Get the branch hints map for this function (if available)
+ if (decoder->module_) {
+ auto branch_hints_it = decoder->module_->branch_hints.find(func_index_);
+ if (branch_hints_it != decoder->module_->branch_hints.end()) {
+ branch_hints_ = &branch_hints_it->second;
+ }
+ }
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
// instance parameter.
builder_->Start(static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
@@ -156,15 +163,15 @@ class WasmGraphBuildingInterface {
void Block(FullDecoder* decoder, Control* block) {
// The branch environment is the outer environment.
- block->end_env = ssa_env_;
+ block->merge_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
void Loop(FullDecoder* decoder, Control* block) {
- SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
- block->end_env = finish_try_env;
- SetEnv(finish_try_env);
- // The continue environment is the inner environment.
+ // This is the merge environment at the beginning of the loop.
+ SsaEnv* merge_env = Steal(decoder->zone(), ssa_env_);
+ block->merge_env = merge_env;
+ SetEnv(merge_env);
ssa_env_->state = SsaEnv::kMerged;
@@ -216,15 +223,15 @@ class WasmGraphBuildingInterface {
control());
}
+ // Now we setup a new environment for the inside of the loop.
SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
-
ssa_env_->SetNotMerged();
- if (!decoder->ok()) return;
+
// Wrap input merge into phis.
for (uint32_t i = 0; i < block->start_merge.arity; ++i) {
Value& val = block->start_merge[i];
- TFNode* inputs[] = {val.node, block->end_env->control};
+ TFNode* inputs[] = {val.node, block->merge_env->control};
val.node = builder_->Phi(val.type, 1, inputs);
}
}
@@ -238,22 +245,34 @@ class WasmGraphBuildingInterface {
SsaEnv* try_env = Steal(decoder->zone(), outer_env);
SetEnv(try_env);
TryInfo* try_info = decoder->zone()->New<TryInfo>(catch_env);
- block->end_env = outer_env;
+ block->merge_env = outer_env;
block->try_info = try_info;
- block->previous_catch = current_catch_;
- current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- builder_->BranchNoHint(cond.node, &if_true, &if_false);
- SsaEnv* end_env = ssa_env_;
+ WasmBranchHint hint = WasmBranchHint::kNoHint;
+ if (branch_hints_) {
+ hint = branch_hints_->GetHintFor(decoder->pc_relative_offset());
+ }
+ switch (hint) {
+ case WasmBranchHint::kNoHint:
+ builder_->BranchNoHint(cond.node, &if_true, &if_false);
+ break;
+ case WasmBranchHint::kUnlikely:
+ builder_->BranchExpectFalse(cond.node, &if_true, &if_false);
+ break;
+ case WasmBranchHint::kLikely:
+ builder_->BranchExpectTrue(cond.node, &if_true, &if_false);
+ break;
+ }
+ SsaEnv* merge_env = ssa_env_;
SsaEnv* false_env = Split(decoder->zone(), ssa_env_);
false_env->control = if_false;
SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
true_env->control = if_true;
- if_block->end_env = end_env;
+ if_block->merge_env = merge_env;
if_block->false_env = false_env;
SetEnv(true_env);
}
@@ -294,11 +313,9 @@ class WasmGraphBuildingInterface {
MergeValuesInto(decoder, block, &block->end_merge, values);
}
// Now continue with the merged environment.
- SetEnv(block->end_env);
+ SetEnv(block->merge_env);
}
- void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
-
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
result->node = builder_->Unop(opcode, value.node, decoder->position());
@@ -482,7 +499,21 @@ class WasmGraphBuildingInterface {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder->zone(), fenv);
fenv->SetNotMerged();
- builder_->BranchNoHint(cond.node, &tenv->control, &fenv->control);
+ WasmBranchHint hint = WasmBranchHint::kNoHint;
+ if (branch_hints_) {
+ hint = branch_hints_->GetHintFor(decoder->pc_relative_offset());
+ }
+ switch (hint) {
+ case WasmBranchHint::kNoHint:
+ builder_->BranchNoHint(cond.node, &tenv->control, &fenv->control);
+ break;
+ case WasmBranchHint::kUnlikely:
+ builder_->BranchExpectFalse(cond.node, &tenv->control, &fenv->control);
+ break;
+ case WasmBranchHint::kLikely:
+ builder_->BranchExpectTrue(cond.node, &tenv->control, &fenv->control);
+ break;
+ }
builder_->SetControl(fenv->control);
SetEnv(tenv);
BrOrRet(decoder, depth, 1);
@@ -639,6 +670,19 @@ class WasmGraphBuildingInterface {
SetEnv(false_env);
}
+ void BrOnNonNull(FullDecoder* decoder, const Value& ref_object,
+ uint32_t depth) {
+ SsaEnv* false_env = ssa_env_;
+ SsaEnv* true_env = Split(decoder->zone(), false_env);
+ false_env->SetNotMerged();
+ builder_->BrOnNull(ref_object.node, &false_env->control,
+ &true_env->control);
+ builder_->SetControl(false_env->control);
+ SetEnv(true_env);
+ BrOrRet(decoder, depth, 0);
+ SetEnv(false_env);
+ }
+
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
NodeVector inputs(args.size());
@@ -689,9 +733,6 @@ class WasmGraphBuildingInterface {
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
-
- current_catch_ = block->previous_catch; // Pop try scope.
-
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
@@ -743,7 +784,6 @@ class WasmGraphBuildingInterface {
// and IfFailure nodes.
builder_->Rethrow(block->try_info->exception);
TerminateThrow(decoder);
- current_catch_ = block->previous_catch;
return;
}
DCHECK(decoder->control_at(depth)->is_try());
@@ -765,7 +805,6 @@ class WasmGraphBuildingInterface {
target_try->exception, block->try_info->exception);
}
}
- current_catch_ = block->previous_catch;
}
void CatchAll(FullDecoder* decoder, Control* block) {
@@ -773,8 +812,6 @@ class WasmGraphBuildingInterface {
block->is_try_unwind());
DCHECK_EQ(decoder->control_at(0), block);
- current_catch_ = block->previous_catch; // Pop try scope.
-
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
@@ -1000,28 +1037,37 @@ class WasmGraphBuildingInterface {
TFNode*, TFNode*, StaticKnowledge, TFNode**, TFNode**, TFNode**,
TFNode**)>
void BrOnCastAbs(FullDecoder* decoder, const Value& object, const Value& rtt,
- Value* value_on_branch, uint32_t br_depth) {
+ Value* forwarding_value, uint32_t br_depth,
+ bool branch_on_match) {
StaticKnowledge config =
ComputeStaticKnowledge(object.type, rtt.type, decoder->module_);
- SsaEnv* match_env = Split(decoder->zone(), ssa_env_);
- SsaEnv* no_match_env = Steal(decoder->zone(), ssa_env_);
- no_match_env->SetNotMerged();
+ SsaEnv* branch_env = Split(decoder->zone(), ssa_env_);
+ SsaEnv* no_branch_env = Steal(decoder->zone(), ssa_env_);
+ no_branch_env->SetNotMerged();
+ SsaEnv* match_env = branch_on_match ? branch_env : no_branch_env;
+ SsaEnv* no_match_env = branch_on_match ? no_branch_env : branch_env;
(builder_->*branch_function)(object.node, rtt.node, config,
&match_env->control, &match_env->effect,
&no_match_env->control, &no_match_env->effect);
- builder_->SetControl(no_match_env->control);
- SetEnv(match_env);
- value_on_branch->node = object.node;
+ builder_->SetControl(no_branch_env->control);
+ SetEnv(branch_env);
+ forwarding_value->node = object.node;
// Currently, br_on_* instructions modify the value stack before calling
// the interface function, so we don't need to drop any values here.
BrOrRet(decoder, br_depth, 0);
- SetEnv(no_match_env);
+ SetEnv(no_branch_env);
}
void BrOnCast(FullDecoder* decoder, const Value& object, const Value& rtt,
Value* value_on_branch, uint32_t br_depth) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnCast>(
- decoder, object, rtt, value_on_branch, br_depth);
+ decoder, object, rtt, value_on_branch, br_depth, true);
+ }
+
+ void BrOnCastFail(FullDecoder* decoder, const Value& object, const Value& rtt,
+ Value* value_on_fallthrough, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnCast>(
+ decoder, object, rtt, value_on_fallthrough, br_depth, false);
}
void RefIsData(FullDecoder* decoder, const Value& object, Value* result) {
@@ -1036,8 +1082,8 @@ class WasmGraphBuildingInterface {
void BrOnData(FullDecoder* decoder, const Value& object,
Value* value_on_branch, uint32_t br_depth) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnData>(
- decoder, object, Value{nullptr, kWasmBottom}, value_on_branch,
- br_depth);
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
+ true);
}
void RefIsFunc(FullDecoder* decoder, const Value& object, Value* result) {
@@ -1052,8 +1098,8 @@ class WasmGraphBuildingInterface {
void BrOnFunc(FullDecoder* decoder, const Value& object,
Value* value_on_branch, uint32_t br_depth) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnFunc>(
- decoder, object, Value{nullptr, kWasmBottom}, value_on_branch,
- br_depth);
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
+ true);
}
void RefIsI31(FullDecoder* decoder, const Value& object, Value* result) {
@@ -1067,8 +1113,8 @@ class WasmGraphBuildingInterface {
void BrOnI31(FullDecoder* decoder, const Value& object,
Value* value_on_branch, uint32_t br_depth) {
BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnI31>(
- decoder, object, Value{nullptr, kWasmBottom}, value_on_branch,
- br_depth);
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch, br_depth,
+ true);
}
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
@@ -1080,7 +1126,8 @@ class WasmGraphBuildingInterface {
private:
SsaEnv* ssa_env_ = nullptr;
compiler::WasmGraphBuilder* builder_;
- uint32_t current_catch_ = kNullCatch;
+ int func_index_;
+ const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
@@ -1088,13 +1135,9 @@ class WasmGraphBuildingInterface {
TFNode* control() { return builder_->control(); }
- uint32_t control_depth_of_current_catch(FullDecoder* decoder) {
- return decoder->control_depth() - 1 - current_catch_;
- }
-
TryInfo* current_try_info(FullDecoder* decoder) {
- DCHECK_LT(current_catch_, decoder->control_depth());
- return decoder->control_at(control_depth_of_current_catch(decoder))
+ DCHECK_LT(decoder->current_catch(), decoder->control_depth());
+ return decoder->control_at(decoder->control_depth_of_current_catch())
->try_info;
}
@@ -1122,9 +1165,6 @@ class WasmGraphBuildingInterface {
case SsaEnv::kMerged:
state = 'M';
break;
- case SsaEnv::kControlEnd:
- state = 'E';
- break;
}
}
PrintF("{set_env = %p, state = %c", env, state);
@@ -1146,7 +1186,7 @@ class WasmGraphBuildingInterface {
V8_INLINE TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
if (node == nullptr) return nullptr;
- const bool inside_try_scope = current_catch_ != kNullCatch;
+ const bool inside_try_scope = decoder->current_catch() != -1;
if (!inside_try_scope) return node;
return CheckForExceptionImpl(decoder, node);
@@ -1170,7 +1210,7 @@ class WasmGraphBuildingInterface {
TryInfo* try_info = current_try_info(decoder);
if (FLAG_wasm_loop_unrolling) {
ValueVector values;
- BuildNestedLoopExits(decoder, control_depth_of_current_catch(decoder),
+ BuildNestedLoopExits(decoder, decoder->control_depth_of_current_catch(),
true, values, &if_exception);
}
Goto(decoder, try_info->catch_env);
@@ -1218,8 +1258,10 @@ class WasmGraphBuildingInterface {
Value* values) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- SsaEnv* target = c->end_env;
+ SsaEnv* target = c->merge_env;
+ // This has to be computed before calling Goto().
const bool first = target->state == SsaEnv::kUnreachable;
+
Goto(decoder, target);
if (merge->arity == 0) return;
@@ -1327,7 +1369,6 @@ class WasmGraphBuildingInterface {
default:
UNREACHABLE();
}
- return ssa_env_->Kill();
}
// Create a complete copy of {from}.
@@ -1357,11 +1398,6 @@ class WasmGraphBuildingInterface {
return result;
}
- // Create an unreachable environment.
- SsaEnv* UnreachableEnv(Zone* zone) {
- return zone->New<SsaEnv>(zone, SsaEnv::kUnreachable, nullptr, nullptr, 0);
- }
-
void DoCall(FullDecoder* decoder, CallMode call_mode, uint32_t table_index,
CheckForNull null_check, TFNode* caller_node,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
@@ -1523,10 +1559,11 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins) {
+ compiler::NodeOriginTable* node_origins,
+ int func_index) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder);
+ &zone, module, enabled, detected, body, builder, func_index);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index ce125313e4..6c668e2b0a 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -10,7 +10,6 @@
#define V8_WASM_GRAPH_BUILDER_INTERFACE_H_
#include "src/wasm/decoder.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -33,7 +32,7 @@ BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins);
+ compiler::NodeOriginTable* node_origins, int func_index);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/memory-protection-key.cc b/deps/v8/src/wasm/memory-protection-key.cc
new file mode 100644
index 0000000000..e8252cd9ce
--- /dev/null
+++ b/deps/v8/src/wasm/memory-protection-key.cc
@@ -0,0 +1,189 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/memory-protection-key.h"
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+#include <sys/mman.h> // For {mprotect()} protection macros.
+#undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h
+#endif
+
+#include "src/base/build_config.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+// Runtime-detection of PKU support with {dlsym()}.
+//
+// For now, we support memory protection keys/PKEYs/PKU only for Linux on x64
+// based on glibc functions {pkey_alloc()}, {pkey_free()}, etc.
+// Those functions are only available since glibc version 2.27:
+// https://man7.org/linux/man-pages/man2/pkey_alloc.2.html
+// However, if we check the glibc verison with V8_GLIBC_PREPREQ here at compile
+// time, this causes two problems due to dynamic linking of glibc:
+// 1) If the compiling system _has_ a new enough glibc, the binary will include
+// calls to {pkey_alloc()} etc., and then the runtime system must supply a
+// new enough glibc version as well. That is, this potentially breaks runtime
+// compatability on older systems (e.g., Ubuntu 16.04 with glibc 2.23).
+// 2) If the compiling system _does not_ have a new enough glibc, PKU support
+// will not be compiled in, even though the runtime system potentially _does_
+// have support for it due to a new enough Linux kernel and glibc version.
+// That is, this results in non-optimal security (PKU available, but not used).
+// Hence, we do _not_ check the glibc version during compilation, and instead
+// only at runtime try to load {pkey_alloc()} etc. with {dlsym()}.
+// TODO(dlehmann): Move this import and freestanding functions below to
+// base/platform/platform.h {OS} (lower-level functions) and
+// {base::PageAllocator} (exported API).
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+#include <dlfcn.h>
+#endif
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(dlehmann) Security: Are there alternatives to disabling CFI altogether
+// for the functions below? Since they are essentially an arbitrary indirect
+// call gadget, disabling CFI should be only a last resort. In Chromium, there
+// was {base::ProtectedMemory} to protect the function pointer from being
+// overwritten, but t seems it was removed to not begin used and AFAICT no such
+// thing exists in V8 to begin with. See
+// https://www.chromium.org/developers/testing/control-flow-integrity and
+// https://crrev.com/c/1884819.
+// What is the general solution for CFI + {dlsym()}?
+// An alternative would be to not rely on glibc and instead implement PKEY
+// directly on top of Linux syscalls + inline asm, but that is quite some low-
+// level code (probably in the order of 100 lines).
+DISABLE_CFI_ICALL
+int AllocateMemoryProtectionKey() {
+// See comment on the import on feature testing for PKEY support.
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ // Try to to find {pkey_alloc()} support in glibc.
+ typedef int (*pkey_alloc_t)(unsigned int, unsigned int);
+ // Cache the {dlsym()} lookup in a {static} variable.
+ static auto* pkey_alloc =
+ bit_cast<pkey_alloc_t>(dlsym(RTLD_DEFAULT, "pkey_alloc"));
+ if (pkey_alloc != nullptr) {
+ // If there is support in glibc, try to allocate a new key.
+ // This might still return -1, e.g., because the kernel does not support
+ // PKU or because there is no more key available.
+ // Different reasons for why {pkey_alloc()} failed could be checked with
+ // errno, e.g., EINVAL vs ENOSPC vs ENOSYS. See manpages and glibc manual
+ // (the latter is the authorative source):
+ // https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys
+ return pkey_alloc(/* flags, unused */ 0, kDisableAccess);
+ }
+#endif
+ return kNoMemoryProtectionKey;
+}
+
+DISABLE_CFI_ICALL
+void FreeMemoryProtectionKey(int key) {
+ // Only free the key if one was allocated.
+ if (key == kNoMemoryProtectionKey) return;
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_free_t)(int);
+ static auto* pkey_free =
+ bit_cast<pkey_free_t>(dlsym(RTLD_DEFAULT, "pkey_free"));
+ // If a valid key was allocated, {pkey_free()} must also be available.
+ DCHECK_NOT_NULL(pkey_free);
+
+ int ret = pkey_free(key);
+ CHECK_EQ(/* success */ 0, ret);
+#else
+ // On platforms without PKU support, we should have already returned because
+ // the key must be {kNoMemoryProtectionKey}.
+ UNREACHABLE();
+#endif
+}
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+// TODO(dlehmann): Copied from base/platform/platform-posix.cc. Should be
+// removed once this code is integrated in base/platform/platform-linux.cc.
+int GetProtectionFromMemoryPermission(base::OS::MemoryPermission access) {
+ switch (access) {
+ case base::OS::MemoryPermission::kNoAccess:
+ case base::OS::MemoryPermission::kNoAccessWillJitLater:
+ return PROT_NONE;
+ case base::OS::MemoryPermission::kRead:
+ return PROT_READ;
+ case base::OS::MemoryPermission::kReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case base::OS::MemoryPermission::kReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case base::OS::MemoryPermission::kReadExecute:
+ return PROT_READ | PROT_EXEC;
+ }
+ UNREACHABLE();
+}
+#endif
+
+DISABLE_CFI_ICALL
+bool SetPermissionsAndMemoryProtectionKey(
+ PageAllocator* page_allocator, base::AddressRegion region,
+ PageAllocator::Permission page_permissions, int key) {
+ DCHECK_NOT_NULL(page_allocator);
+
+ void* address = reinterpret_cast<void*>(region.begin());
+ size_t size = region.size();
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_mprotect_t)(void*, size_t, int, int);
+ static auto* pkey_mprotect =
+ bit_cast<pkey_mprotect_t>(dlsym(RTLD_DEFAULT, "pkey_mprotect"));
+
+ if (pkey_mprotect == nullptr) {
+ // If there is no runtime support for {pkey_mprotect()}, no key should have
+ // been allocated in the first place.
+ DCHECK_EQ(kNoMemoryProtectionKey, key);
+
+ // Without PKU support, fallback to regular {mprotect()}.
+ return page_allocator->SetPermissions(address, size, page_permissions);
+ }
+
+ // Copied with slight modifications from base/platform/platform-posix.cc
+ // {OS::SetPermissions()}.
+ // TODO(dlehmann): Move this block into its own function at the right
+ // abstraction boundary (likely some static method in platform.h {OS})
+ // once the whole PKU code is moved into base/platform/.
+ DCHECK_EQ(0, region.begin() % page_allocator->CommitPageSize());
+ DCHECK_EQ(0, size % page_allocator->CommitPageSize());
+
+ int protection = GetProtectionFromMemoryPermission(
+ static_cast<base::OS::MemoryPermission>(page_permissions));
+
+ int ret = pkey_mprotect(address, size, protection, key);
+
+ return ret == /* success */ 0;
+#else
+ // Without PKU support, fallback to regular {mprotect()}.
+ return page_allocator->SetPermissions(address, size, page_permissions);
+#endif
+}
+
+DISABLE_CFI_ICALL
+bool SetPermissionsForMemoryProtectionKey(
+ int key, MemoryProtectionKeyPermission permissions) {
+ if (key == kNoMemoryProtectionKey) return false;
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_set_t)(int, unsigned int);
+ static auto* pkey_set = bit_cast<pkey_set_t>(dlsym(RTLD_DEFAULT, "pkey_set"));
+ // If a valid key was allocated, {pkey_set()} must also be available.
+ DCHECK_NOT_NULL(pkey_set);
+
+ int ret = pkey_set(key, permissions);
+
+ return ret == /* success */ 0;
+#else
+ // On platforms without PKU support, we should have already returned because
+ // the key must be {kNoMemoryProtectionKey}.
+ UNREACHABLE();
+#endif
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/memory-protection-key.h b/deps/v8/src/wasm/memory-protection-key.h
new file mode 100644
index 0000000000..9f9a200cdf
--- /dev/null
+++ b/deps/v8/src/wasm/memory-protection-key.h
@@ -0,0 +1,90 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_MEMORY_PROTECTION_KEY_H_
+#define V8_WASM_MEMORY_PROTECTION_KEY_H_
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+#include <sys/mman.h> // For STATIC_ASSERT of permission values.
+#undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h
+#endif
+
+#include "include/v8-platform.h"
+#include "src/base/address-region.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(dlehmann): Move this to base/platform/platform.h {OS} (lower-level API)
+// and {base::PageAllocator} (higher-level, exported API) once the API is more
+// stable and we have converged on a better design (e.g., typed class wrapper
+// around int memory protection key).
+
+// Sentinel value if there is no PKU support or allocation of a key failed.
+// This is also the return value on an error of pkey_alloc() and has the
+// benefit that calling pkey_mprotect() with -1 behaves the same as regular
+// mprotect().
+constexpr int kNoMemoryProtectionKey = -1;
+
+// Permissions for memory protection keys on top of the page's permissions.
+// NOTE: Since there is no executable bit, the executable permission cannot be
+// withdrawn by memory protection keys.
+enum MemoryProtectionKeyPermission {
+ kNoRestrictions = 0,
+ kDisableAccess = 1,
+ kDisableWrite = 2,
+};
+
+// If sys/mman.h has PKEY support (on newer Linux distributions), ensure that
+// our definitions of the permissions is consistent with the ones in glibc.
+#if defined(PKEY_DISABLE_ACCESS)
+STATIC_ASSERT(kDisableAccess == PKEY_DISABLE_ACCESS);
+STATIC_ASSERT(kDisableWrite == PKEY_DISABLE_WRITE);
+#endif
+
+// Allocates a memory protection key on platforms with PKU support, returns
+// {kNoMemoryProtectionKey} on platforms without support or when allocation
+// failed at runtime.
+int AllocateMemoryProtectionKey();
+
+// Frees the given memory protection key, to make it available again for the
+// next call to {AllocateMemoryProtectionKey()}. Note that this does NOT
+// invalidate access rights to pages that are still tied to that key. That is,
+// if the key is reused and pages with that key are still accessable, this might
+// be a security issue. See
+// https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys
+void FreeMemoryProtectionKey(int key);
+
+// Associates a memory protection {key} with the given {region}.
+// If {key} is {kNoMemoryProtectionKey} this behaves like "plain"
+// {SetPermissions()} and associates the default key to the region. That is,
+// explicitly calling with {kNoMemoryProtectionKey} can be used to disassociate
+// any protection key from a region. This also means "plain" {SetPermissions()}
+// disassociates the key from a region, making the key's access restrictions
+// irrelevant/inactive for that region.
+// Returns true if changing permissions and key was successful. (Returns a bool
+// to be consistent with {SetPermissions()}).
+// The {page_permissions} are the permissions of the page, not the key. For
+// changing the permissions of the key, use
+// {SetPermissionsForMemoryProtectionKey()} instead.
+bool SetPermissionsAndMemoryProtectionKey(
+ PageAllocator* page_allocator, base::AddressRegion region,
+ PageAllocator::Permission page_permissions, int key);
+
+// Set the key's permissions and return whether this was successful.
+// Returns false on platforms without PKU support or when the operation failed,
+// e.g., because the key was invalid.
+bool SetPermissionsForMemoryProtectionKey(
+ int key, MemoryProtectionKeyPermission permissions);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MEMORY_PROTECTION_KEY_H_
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 0d88c4b461..68310a03f3 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -50,7 +50,7 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
}
const char* eng =
tier.has_value() ? ExecutionTierToString(tier.value()) : "?";
- printf("%-11s func:%6d+0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
+ printf("%-11s func:%6d:0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
position, info->is_store ? " store to" : "load from", info->offset,
value.begin());
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 3b1d8750ba..4742a85070 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -32,7 +32,6 @@
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -531,17 +530,19 @@ class CompilationStateImpl {
CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters);
~CompilationStateImpl() {
- DCHECK(compile_job_->IsValid());
- compile_job_->CancelAndDetach();
+ if (compile_job_->IsValid()) compile_job_->CancelAndDetach();
}
// Call right after the constructor, after the {compilation_state_} field in
// the {NativeModule} has been initialized.
void InitCompileJob(WasmEngine*);
- // Cancel all background compilation, without waiting for compile tasks to
- // finish.
- void CancelCompilation();
+ // {kCancelUnconditionally}: Cancel all compilation.
+ // {kCancelInitialCompilation}: Cancel all compilation if initial (baseline)
+ // compilation is not finished yet.
+ enum CancellationPolicy { kCancelUnconditionally, kCancelInitialCompilation };
+ void CancelCompilation(CancellationPolicy);
+
bool cancelled() const;
// Initialize compilation progress. Set compilation tiers to expect for
@@ -767,7 +768,6 @@ void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
using Feature = v8::Isolate::UseCounterFeature;
constexpr static std::pair<WasmFeature, Feature> kUseCounters[] = {
{kFeature_reftypes, Feature::kWasmRefTypes},
- {kFeature_mv, Feature::kWasmMultiValue},
{kFeature_simd, Feature::kWasmSimdOpcodes},
{kFeature_threads, Feature::kWasmThreadOpcodes},
{kFeature_eh, Feature::kWasmExceptionHandling}};
@@ -791,7 +791,14 @@ void CompilationState::InitCompileJob(WasmEngine* engine) {
Impl(this)->InitCompileJob(engine);
}
-void CompilationState::CancelCompilation() { Impl(this)->CancelCompilation(); }
+void CompilationState::CancelCompilation() {
+ Impl(this)->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
+}
+
+void CompilationState::CancelInitialCompilation() {
+ Impl(this)->CancelCompilation(
+ CompilationStateImpl::kCancelInitialCompilation);
+}
void CompilationState::SetError() { Impl(this)->SetError(); }
@@ -1202,16 +1209,25 @@ CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
+ OperationsBarrier::Token wrapper_compilation_token;
+ Isolate* isolate;
+
{
BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kYield;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
if (!wrapper_unit) return kNoMoreUnits;
+ isolate = wrapper_unit->isolate();
+ wrapper_compilation_token =
+ compile_scope.native_module()->engine()->StartWrapperCompilation(
+ isolate);
+ if (!wrapper_compilation_token) return kNoMoreUnits;
}
TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation");
while (true) {
+ DCHECK_EQ(isolate, wrapper_unit->isolate());
wrapper_unit->Execute();
++num_processed_wrappers;
bool yield = delegate && delegate->ShouldYield();
@@ -1829,10 +1845,10 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
AsyncCompileJob::~AsyncCompileJob() {
// Note: This destructor always runs on the foreground thread of the isolate.
background_task_manager_.CancelAndWait();
- // If the runtime objects were not created yet, then initial compilation did
- // not finish yet. In this case we can abort compilation.
- if (native_module_ && module_object_.is_null()) {
- Impl(native_module_->compilation_state())->CancelCompilation();
+ // If initial compilation did not finish yet we can abort it.
+ if (native_module_) {
+ Impl(native_module_->compilation_state())
+ ->CancelCompilation(CompilationStateImpl::kCancelInitialCompilation);
}
// Tell the streaming decoder that the AsyncCompileJob is not available
// anymore.
@@ -2459,7 +2475,8 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
// Check if there is already a CompiledModule, in which case we have to clean
// up the CompilationStateImpl as well.
if (job_->native_module_) {
- Impl(job_->native_module_->compilation_state())->CancelCompilation();
+ Impl(job_->native_module_->compilation_state())
+ ->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
job_->DoSync<AsyncCompileJob::DecodeFail,
AsyncCompileJob::kUseExistingForegroundTask>(error);
@@ -2783,13 +2800,22 @@ void CompilationStateImpl::InitCompileJob(WasmEngine* engine) {
async_counters_));
}
-void CompilationStateImpl::CancelCompilation() {
+void CompilationStateImpl::CancelCompilation(
+ CompilationStateImpl::CancellationPolicy cancellation_policy) {
+ base::MutexGuard callbacks_guard(&callbacks_mutex_);
+
+ if (cancellation_policy == kCancelInitialCompilation &&
+ finished_events_.contains(
+ CompilationEvent::kFinishedBaselineCompilation)) {
+ // Initial compilation already finished; cannot be cancelled.
+ return;
+ }
+
// std::memory_order_relaxed is sufficient because no other state is
// synchronized with |compile_cancelled_|.
compile_cancelled_.store(true, std::memory_order_relaxed);
// No more callbacks after abort.
- base::MutexGuard callbacks_guard(&callbacks_mutex_);
callbacks_.clear();
}
@@ -3040,7 +3066,8 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
- Handle<Code> code = unit->Finalize(isolate);
+ DCHECK_EQ(isolate, unit->isolate());
+ Handle<Code> code = unit->Finalize();
int wrapper_index =
GetExportWrapperIndex(module, unit->sig(), unit->is_import());
(*export_wrappers_out)->set(wrapper_index, *code);
@@ -3090,7 +3117,8 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
DCHECK_NOT_NULL(code);
DCHECK_LT(code->index(), native_module_->num_functions());
- if (code->index() < native_module_->num_imported_functions()) {
+ if (code->index() <
+ static_cast<int>(native_module_->num_imported_functions())) {
// Import wrapper.
DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
outstanding_baseline_units_--;
@@ -3449,7 +3477,8 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
for (auto& pair : compilation_units) {
JSToWasmWrapperKey key = pair.first;
JSToWasmWrapperCompilationUnit* unit = pair.second.get();
- Handle<Code> code = unit->Finalize(isolate);
+ DCHECK_EQ(isolate, unit->isolate());
+ Handle<Code> code = unit->Finalize();
int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
(*export_wrappers_out)->set(wrapper_index, *code);
RecordStats(*code, isolate->counters());
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index f2c77efb23..be4d8ef833 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -34,6 +34,7 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
+constexpr char kBranchHintsString[] = "branchHints";
constexpr char kDebugInfoString[] = ".debug_info";
constexpr char kExternalDebugInfoString[] = "external_debug_info";
@@ -95,6 +96,8 @@ const char* SectionName(SectionCode code) {
return kExternalDebugInfoString;
case kCompilationHintsSectionCode:
return kCompilationHintsString;
+ case kBranchHintsSectionCode:
+ return kBranchHintsString;
default:
return "<unknown>";
}
@@ -144,6 +147,7 @@ SectionCode IdentifyUnknownSectionInternal(Decoder* decoder) {
{StaticCharVector(kNameString), kNameSectionCode},
{StaticCharVector(kSourceMappingURLString), kSourceMappingURLSectionCode},
{StaticCharVector(kCompilationHintsString), kCompilationHintsSectionCode},
+ {StaticCharVector(kBranchHintsString), kBranchHintsSectionCode},
{StaticCharVector(kDebugInfoString), kDebugInfoSectionCode},
{StaticCharVector(kExternalDebugInfoString),
kExternalDebugInfoSectionCode}};
@@ -432,6 +436,13 @@ class ModuleDecoderImpl : public Decoder {
// first occurrence after function section and before code section are
// ignored.
break;
+ case kBranchHintsSectionCode:
+ // TODO(yuri): report out of place branch hints section as a
+ // warning.
+ // Be lenient with placement of compilation hints section. All except
+ // first occurrence after function section and before code section are
+ // ignored.
+ break;
default:
next_ordered_section_ = section_code + 1;
break;
@@ -498,6 +509,15 @@ class ModuleDecoderImpl : public Decoder {
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
break;
+ case kBranchHintsSectionCode:
+ if (enabled_features_.has_branch_hinting()) {
+ DecodeBranchHintsSection();
+ } else {
+ // Ignore this section when feature was disabled. It is an optional
+ // custom section anyways.
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+ break;
case kDataCountSectionCode:
DecodeDataCountSection();
break;
@@ -884,50 +904,25 @@ class ModuleDecoderImpl : public Decoder {
uint32_t element_count =
consume_count("element count", FLAG_wasm_max_table_size);
- for (uint32_t i = 0; ok() && i < element_count; ++i) {
- const byte* pos = pc();
-
- WasmElemSegment::Status status;
- bool functions_as_elements;
- uint32_t table_index;
- WasmInitExpr offset;
- ValueType type = kWasmBottom;
- consume_element_segment_header(&status, &functions_as_elements, &type,
- &table_index, &offset);
+ for (uint32_t i = 0; i < element_count; ++i) {
+ bool expressions_as_elements;
+ WasmElemSegment segment =
+ consume_element_segment_header(&expressions_as_elements);
if (failed()) return;
- DCHECK_NE(type, kWasmBottom);
-
- if (status == WasmElemSegment::kStatusActive) {
- if (table_index >= module_->tables.size()) {
- errorf(pos, "out of bounds table index %u", table_index);
- break;
- }
- if (!IsSubtypeOf(type, module_->tables[table_index].type,
- this->module_.get())) {
- errorf(pos,
- "Invalid element segment. Table %u is not a super-type of %s",
- table_index, type.name().c_str());
- break;
- }
- }
+ DCHECK_NE(segment.type, kWasmBottom);
uint32_t num_elem =
consume_count("number of elements", max_table_init_entries());
- if (status == WasmElemSegment::kStatusActive) {
- module_->elem_segments.emplace_back(table_index, std::move(offset));
- } else {
- module_->elem_segments.emplace_back(
- status == WasmElemSegment::kStatusDeclarative);
- }
- WasmElemSegment* init = &module_->elem_segments.back();
- init->type = type;
for (uint32_t j = 0; j < num_elem; j++) {
- uint32_t index = functions_as_elements ? consume_element_expr()
- : consume_element_func_index();
- if (failed()) break;
- init->entries.push_back(index);
+ WasmInitExpr init =
+ expressions_as_elements
+ ? consume_element_expr()
+ : WasmInitExpr::RefFuncConst(consume_element_func_index());
+ if (failed()) return;
+ segment.entries.push_back(std::move(init));
}
+ module_->elem_segments.push_back(std::move(segment));
}
}
@@ -1174,6 +1169,82 @@ class ModuleDecoderImpl : public Decoder {
// consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeBranchHintsSection() {
+ TRACE("DecodeBranchHints module+%d\n", static_cast<int>(pc_ - start_));
+ if (!has_seen_unordered_section(kBranchHintsSectionCode)) {
+ set_seen_unordered_section(kBranchHintsSectionCode);
+ // Use an inner decoder so that errors don't fail the outer decoder.
+ Decoder inner(start_, pc_, end_, buffer_offset_);
+ BranchHintInfo branch_hints;
+
+ uint32_t func_count = inner.consume_u32v("number of functions");
+ // Keep track of the previous function index to validate the ordering
+ int64_t last_func_idx = -1;
+ for (uint32_t i = 0; i < func_count; i++) {
+ uint32_t func_idx = inner.consume_u32v("function index");
+ if (int64_t(func_idx) <= last_func_idx) {
+ inner.errorf("Invalid function index: %d", func_idx);
+ break;
+ }
+ last_func_idx = func_idx;
+ uint8_t reserved = inner.consume_u8("reserved byte");
+ if (reserved != 0x0) {
+ inner.errorf("Invalid reserved byte: %#x", reserved);
+ break;
+ }
+ uint32_t num_hints = inner.consume_u32v("number of hints");
+ BranchHintMap func_branch_hints;
+ TRACE("DecodeBranchHints[%d] module+%d\n", func_idx,
+ static_cast<int>(inner.pc() - inner.start()));
+ // Keep track of the previous branch offset to validate the ordering
+ int64_t last_br_off = -1;
+ for (uint32_t j = 0; j < num_hints; ++j) {
+ uint32_t br_dir = inner.consume_u32v("branch direction");
+ uint32_t br_off = inner.consume_u32v("branch instruction offset");
+ if (int64_t(br_off) <= last_br_off) {
+ inner.errorf("Invalid branch offset: %d", br_off);
+ break;
+ }
+ last_br_off = br_off;
+ TRACE("DecodeBranchHints[%d][%d] module+%d\n", func_idx, br_off,
+ static_cast<int>(inner.pc() - inner.start()));
+ WasmBranchHint hint;
+ switch (br_dir) {
+ case 0:
+ hint = WasmBranchHint::kUnlikely;
+ break;
+ case 1:
+ hint = WasmBranchHint::kLikely;
+ break;
+ default:
+ hint = WasmBranchHint::kNoHint;
+ inner.errorf(inner.pc(), "Invalid branch hint %#x", br_dir);
+ break;
+ }
+ if (!inner.ok()) {
+ break;
+ }
+ func_branch_hints.insert(br_off, hint);
+ }
+ if (!inner.ok()) {
+ break;
+ }
+ branch_hints.emplace(func_idx, std::move(func_branch_hints));
+ }
+ // Extra unexpected bytes are an error.
+ if (inner.more()) {
+ inner.errorf("Unexpected extra bytes: %d\n",
+ static_cast<int>(inner.pc() - inner.start()));
+ }
+ // If everything went well, accept the hints for the module.
+ if (inner.ok()) {
+ module_->branch_hints = std::move(branch_hints);
+ }
+ }
+ // Skip the whole branch hints section in the outer decoder.
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+
void DecodeDataCountSection() {
module_->num_declared_data_segments =
consume_count("data segments count", kV8MaxWasmDataSegments);
@@ -1911,10 +1982,8 @@ class ModuleDecoderImpl : public Decoder {
std::vector<ValueType> returns;
// Parse return types.
- const size_t max_return_count = enabled_features_.has_mv()
- ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns;
- uint32_t return_count = consume_count("return count", max_return_count);
+ uint32_t return_count =
+ consume_count("return count", kV8MaxWasmFunctionReturns);
if (failed()) return nullptr;
for (uint32_t i = 0; ok() && i < return_count; ++i) {
returns.push_back(consume_value_type());
@@ -1967,86 +2036,114 @@ class ModuleDecoderImpl : public Decoder {
return attribute;
}
- void consume_element_segment_header(WasmElemSegment::Status* status,
- bool* functions_as_elements,
- ValueType* type, uint32_t* table_index,
- WasmInitExpr* offset) {
+ WasmElemSegment consume_element_segment_header(
+ bool* expressions_as_elements) {
const byte* pos = pc();
- uint32_t flag = consume_u32v("flag");
// The mask for the bit in the flag which indicates if the segment is
- // active or not.
- constexpr uint8_t kIsPassiveMask = 0x01;
- // The mask for the bit in the flag which indicates if the segment has an
- // explicit table index field.
- constexpr uint8_t kHasTableIndexMask = 0x02;
+ // active or not (0 is active).
+ constexpr uint8_t kNonActiveMask = 1 << 0;
+ // The mask for the bit in the flag which indicates:
+ // - for active tables, if the segment has an explicit table index field.
+ // - for non-active tables, whether the table is declarative (vs. passive).
+ constexpr uint8_t kHasTableIndexOrIsDeclarativeMask = 1 << 1;
// The mask for the bit in the flag which indicates if the functions of this
- // segment are defined as function indices (=0) or elements(=1).
- constexpr uint8_t kFunctionsAsElementsMask = 0x04;
- constexpr uint8_t kFullMask =
- kIsPassiveMask | kHasTableIndexMask | kFunctionsAsElementsMask;
-
- bool is_passive = flag & kIsPassiveMask;
- if (!is_passive) {
- *status = WasmElemSegment::kStatusActive;
- if (module_->tables.size() == 0) {
- error(pc_, "Active element sections require a table");
- }
- } else if ((flag & kHasTableIndexMask)) { // Special bit combination for
- // declarative segments.
- *status = WasmElemSegment::kStatusDeclarative;
- } else {
- *status = WasmElemSegment::kStatusPassive;
+ // segment are defined as function indices (0) or init. expressions (1).
+ constexpr uint8_t kExpressionsAsElementsMask = 1 << 2;
+ constexpr uint8_t kFullMask = kNonActiveMask |
+ kHasTableIndexOrIsDeclarativeMask |
+ kExpressionsAsElementsMask;
+
+ uint32_t flag = consume_u32v("flag");
+ if ((flag & kFullMask) != flag) {
+ errorf(pos, "illegal flag value %u. Must be between 0 and 7", flag);
+ return {};
}
- *functions_as_elements = flag & kFunctionsAsElementsMask;
- bool has_table_index = (flag & kHasTableIndexMask) &&
- *status == WasmElemSegment::kStatusActive;
- if (*status == WasmElemSegment::kStatusDeclarative &&
+ const WasmElemSegment::Status status =
+ (flag & kNonActiveMask) ? (flag & kHasTableIndexOrIsDeclarativeMask)
+ ? WasmElemSegment::kStatusDeclarative
+ : WasmElemSegment::kStatusPassive
+ : WasmElemSegment::kStatusActive;
+ if (status == WasmElemSegment::kStatusDeclarative &&
!enabled_features_.has_reftypes()) {
error(
"Declarative element segments require --experimental-wasm-reftypes");
- return;
- }
- if ((flag & kFullMask) != flag) {
- errorf(pos, "illegal flag value %u. Must be between 0 and 7", flag);
+ return {};
}
+ const bool is_active = status == WasmElemSegment::kStatusActive;
- if (has_table_index) {
- *table_index = consume_u32v("table index");
- } else {
- *table_index = 0;
- }
+ *expressions_as_elements = flag & kExpressionsAsElementsMask;
- if (*status == WasmElemSegment::kStatusActive) {
- *offset = consume_init_expr(module_.get(), kWasmI32,
- module_.get()->globals.size());
- if (offset->kind() == WasmInitExpr::kNone) {
- // Failed to parse offset initializer, return early.
- return;
- }
+ const bool has_table_index =
+ is_active && (flag & kHasTableIndexOrIsDeclarativeMask);
+ uint32_t table_index = has_table_index ? consume_u32v("table index") : 0;
+ if (is_active && table_index >= module_->tables.size()) {
+ errorf(pos, "out of bounds%s table index %u",
+ has_table_index ? " implicit" : "", table_index);
+ return {};
}
-
- if (*status == WasmElemSegment::kStatusActive && !has_table_index) {
- // Active segments without table indices are a special case for backwards
- // compatibility. These cases have an implicit element kind or element
- // type, so we are done already with the segment header.
- *type = kWasmFuncRef;
- return;
+ ValueType table_type =
+ is_active ? module_->tables[table_index].type : kWasmBottom;
+
+ WasmInitExpr offset;
+ if (is_active) {
+ offset = consume_init_expr(module_.get(), kWasmI32,
+ module_.get()->globals.size());
+ // Failed to parse offset initializer, return early.
+ if (failed()) return {};
+ }
+
+ // Denotes an active segment without table index, type, or element kind.
+ const bool backwards_compatible_mode =
+ is_active && !(flag & kHasTableIndexOrIsDeclarativeMask);
+ ValueType type;
+ if (*expressions_as_elements) {
+ type =
+ backwards_compatible_mode ? kWasmFuncRef : consume_reference_type();
+ if (is_active && !IsSubtypeOf(type, table_type, this->module_.get())) {
+ errorf(pos,
+ "Element segment of type %s is not a subtype of referenced "
+ "table %u (of type %s)",
+ type.name().c_str(), table_index, table_type.name().c_str());
+ return {};
+ }
+ } else {
+ if (!backwards_compatible_mode) {
+ // We have to check that there is an element kind of type Function. All
+ // other element kinds are not valid yet.
+ uint8_t val = consume_u8("element kind");
+ if (static_cast<ImportExportKindCode>(val) != kExternalFunction) {
+ errorf(pos, "illegal element kind 0x%x. Must be 0x%x", val,
+ kExternalFunction);
+ return {};
+ }
+ }
+ if (!is_active) {
+ // Declarative and passive segments without explicit type are funcref.
+ type = kWasmFuncRef;
+ } else {
+ type = table_type;
+ // Active segments with function indices must reference a function
+ // table. TODO(7748): Add support for anyref tables when we have them.
+ if (!IsSubtypeOf(table_type, kWasmFuncRef, this->module_.get())) {
+ errorf(pos,
+ "An active element segment with function indices as elements "
+ "must reference a table of %s. Instead, table %u of type %s "
+ "is referenced.",
+ enabled_features_.has_typed_funcref()
+ ? "a subtype of type funcref"
+ : "type funcref",
+ table_index, table_type.name().c_str());
+ return {};
+ }
+ }
}
- if (*functions_as_elements) {
- *type = consume_reference_type();
+ if (is_active) {
+ return {type, table_index, std::move(offset)};
} else {
- // We have to check that there is an element kind of type Function. All
- // other element kinds are not valid yet.
- uint8_t val = consume_u8("element kind");
- ImportExportKindCode kind = static_cast<ImportExportKindCode>(val);
- if (kind != kExternalFunction) {
- errorf(pos, "illegal element kind %x. Must be 0x00", val);
- return;
- }
- *type = kWasmFuncRef;
+ return {type, status == WasmElemSegment::kStatusDeclarative};
}
}
@@ -2091,32 +2188,49 @@ class ModuleDecoderImpl : public Decoder {
func->declared = true;
DCHECK_NE(func, nullptr);
DCHECK_EQ(index, func->func_index);
- DCHECK_NE(index, WasmElemSegment::kNullIndex);
return index;
}
- uint32_t consume_element_expr() {
- uint32_t index = WasmElemSegment::kNullIndex;
+ // TODO(manoskouk): When reftypes lands, remove this and use
+ // consume_init_expr() instead.
+ WasmInitExpr consume_element_expr() {
uint8_t opcode = consume_u8("element opcode");
- if (failed()) return index;
+ if (failed()) return {};
switch (opcode) {
case kExprRefNull: {
HeapTypeImmediate<kFullValidation> imm(WasmFeatures::All(), this,
this->pc(), module_.get());
consume_bytes(imm.length, "ref.null immediate");
- index = WasmElemSegment::kNullIndex;
- break;
+ expect_u8("end opcode", kExprEnd);
+ return WasmInitExpr::RefNullConst(imm.type.representation());
+ }
+ case kExprRefFunc: {
+ uint32_t index = consume_element_func_index();
+ if (failed()) return {};
+ expect_u8("end opcode", kExprEnd);
+ return WasmInitExpr::RefFuncConst(index);
+ }
+ case kExprGlobalGet: {
+ if (!enabled_features_.has_reftypes()) {
+ errorf(
+ "Unexpected opcode 0x%x in element. Enable with "
+ "--experimental-wasm-reftypes",
+ kExprGlobalGet);
+ return {};
+ }
+ uint32_t index = this->consume_u32v("global index");
+ if (failed()) return {};
+ if (index >= module_->globals.size()) {
+ errorf("Out-of-bounds global index %d", index);
+ return {};
+ }
+ expect_u8("end opcode", kExprEnd);
+ return WasmInitExpr::GlobalGet(index);
}
- case kExprRefFunc:
- index = consume_element_func_index();
- if (failed()) return index;
- break;
default:
error("invalid opcode in element");
- break;
+ return {};
}
- expect_u8("end opcode", kExprEnd);
- return index;
}
};
@@ -2374,8 +2488,7 @@ bool FindNameSection(Decoder* decoder) {
} // namespace
void DecodeFunctionNames(const byte* module_start, const byte* module_end,
- std::unordered_map<uint32_t, WireBytesRef>* names,
- const Vector<const WasmExport> export_table) {
+ std::unordered_map<uint32_t, WireBytesRef>* names) {
DCHECK_NOT_NULL(names);
DCHECK(names->empty());
@@ -2407,13 +2520,6 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
}
-
- // Extract from export table.
- for (const WasmExport& exp : export_table) {
- if (exp.kind == kExternalFunction && names->count(exp.index) == 0) {
- names->insert(std::make_pair(exp.index, exp.name));
- }
- }
}
NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 2d33f51f31..2af2760ab4 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -187,13 +187,11 @@ V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
// function.
AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets);
-// Decode the function names from the name section and also look at export
-// table. Returns the result as an unordered map. Only names with valid utf8
-// encoding are stored and conflicts are resolved by choosing the last name
-// read.
+// Decode the function names from the name section. Returns the result as an
+// unordered map. Only names with valid utf8 encoding are stored and conflicts
+// are resolved by choosing the last name read.
void DecodeFunctionNames(const byte* module_start, const byte* module_end,
- std::unordered_map<uint32_t, WireBytesRef>* names,
- const Vector<const WasmExport> export_table);
+ std::unordered_map<uint32_t, WireBytesRef>* names);
// Decode the requested subsection of the name section.
// The result will be empty if no name section is present. On encountering an
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index f64a657eb8..7945e79849 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -20,6 +20,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-subtyping.h"
+#include "src/wasm/wasm-value.h"
#define TRACE(...) \
do { \
@@ -122,16 +123,17 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
int struct_index, Handle<Map> opt_rtt_parent) {
const wasm::StructType* type = module->struct_type(struct_index);
const int inobject_properties = 0;
- DCHECK_LE(type->total_fields_size(), kMaxInt - WasmStruct::kHeaderSize);
- const int instance_size =
- WasmStruct::kHeaderSize + static_cast<int>(type->total_fields_size());
+ // We have to use the variable size sentinel because the instance size
+ // stored directly in a Map is capped at 255 pointer sizes.
+ const int map_instance_size = kVariableSizeSentinel;
+ const int real_instance_size = WasmStruct::Size(type);
const InstanceType instance_type = WASM_STRUCT_TYPE;
// TODO(jkummerow): If NO_ELEMENTS were supported, we could use that here.
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), opt_rtt_parent);
+ reinterpret_cast<Address>(type), opt_rtt_parent, real_instance_size);
Handle<Map> map = isolate->factory()->NewMap(
- instance_type, instance_size, elements_kind, inobject_properties);
+ instance_type, map_instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
return map;
}
@@ -141,10 +143,12 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
const wasm::ArrayType* type = module->array_type(array_index);
const int inobject_properties = 0;
const int instance_size = kVariableSizeSentinel;
+ // Wasm Arrays don't have a static instance size.
+ const int cached_instance_size = 0;
const InstanceType instance_type = WASM_ARRAY_TYPE;
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), opt_rtt_parent);
+ reinterpret_cast<Address>(type), opt_rtt_parent, cached_instance_size);
Handle<Map> map = isolate->factory()->NewMap(
instance_type, instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
@@ -615,7 +619,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// list.
//--------------------------------------------------------------------------
if (enabled_.has_gc()) {
- Handle<FixedArray> maps = isolate_->factory()->NewUninitializedFixedArray(
+ Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
static_cast<int>(module_->type_kinds.size()));
for (int map_index = 0;
map_index < static_cast<int>(module_->type_kinds.size());
@@ -1325,11 +1329,15 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
// TODO(wasm): Still observable if Function.prototype.valueOf or friends
// are patched, we might need to check for that as well.
if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
- if (value->IsPrimitive() && !value->IsSymbol()) {
- if (global.type == kWasmI32) {
- value = Object::ToInt32(isolate_, value).ToHandleChecked();
- } else {
- value = Object::ToNumber(isolate_, value).ToHandleChecked();
+ if (value->IsPrimitive()) {
+ MaybeHandle<Object> converted = global.type == kWasmI32
+ ? Object::ToInt32(isolate_, value)
+ : Object::ToNumber(isolate_, value);
+ if (!converted.ToHandle(&value)) {
+ // Conversion is known to fail for Symbols and BigInts.
+ ReportLinkError("global import must be a number", import_index,
+ module_name, import_name);
+ return false;
}
}
}
@@ -1903,10 +1911,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmModule* module = instance->module();
for (size_t i = 0; i < count; ++i) {
- uint32_t func_index = elem_segment.entries[src + i];
+ const WasmInitExpr* init = &elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i);
- if (func_index == WasmElemSegment::kNullIndex) {
+ if (init->kind() == WasmInitExpr::kRefNullConst) {
if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
@@ -1915,6 +1923,18 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
continue;
}
+ if (init->kind() == WasmInitExpr::kGlobalGet) {
+ WasmTableObject::Set(
+ isolate, table_object, entry_index,
+ WasmInstanceObject::GetGlobalValue(
+ instance, module->globals[init->immediate().index])
+ .to_ref());
+ continue;
+ }
+
+ DCHECK_EQ(init->kind(), WasmInitExpr::kRefFuncConst);
+
+ const uint32_t func_index = init->immediate().index;
const WasmFunction* function = &module->functions[func_index];
// Update the local dispatch table first if necessary.
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 983e2090b6..7895a731f6 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -94,7 +94,7 @@ class HeapType {
}
explicit constexpr HeapType(Representation repr) : representation_(repr) {
- CONSTEXPR_DCHECK(is_bottom() || is_valid());
+ DCHECK(is_bottom() || is_valid());
}
explicit constexpr HeapType(uint32_t repr)
: HeapType(static_cast<Representation>(repr)) {}
@@ -116,7 +116,7 @@ class HeapType {
constexpr Representation representation() const { return representation_; }
constexpr uint32_t ref_index() const {
- CONSTEXPR_DCHECK(is_index());
+ DCHECK(is_index());
return representation_;
}
@@ -201,7 +201,7 @@ constexpr int element_size_log2(ValueKind kind) {
};
int size_log_2 = kElementSizeLog2[kind];
- CONSTEXPR_DCHECK(size_log_2 >= 0);
+ DCHECK_LE(0, size_log_2);
return size_log_2;
}
@@ -214,7 +214,7 @@ constexpr int element_size_bytes(ValueKind kind) {
};
int size = kElementSize[kind];
- CONSTEXPR_DCHECK(size > 0);
+ DCHECK_LT(0, size);
return size;
}
@@ -240,7 +240,7 @@ constexpr const char* name(ValueKind kind) {
}
constexpr MachineType machine_type(ValueKind kind) {
- CONSTEXPR_DCHECK(kBottom != kind);
+ DCHECK_NE(kBottom, kind);
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
@@ -262,7 +262,7 @@ constexpr bool is_rtt(ValueKind kind) {
}
constexpr bool is_defaultable(ValueKind kind) {
- CONSTEXPR_DCHECK(kind != kBottom && kind != kVoid);
+ DCHECK(kind != kBottom && kind != kVoid);
return kind != kRef && !is_rtt(kind);
}
@@ -277,11 +277,11 @@ class ValueType {
/******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kVoid)) {}
static constexpr ValueType Primitive(ValueKind kind) {
- CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
+ DCHECK(kind == kBottom || kind <= kI16);
return ValueType(KindField::encode(kind));
}
static constexpr ValueType Ref(uint32_t heap_type, Nullability nullability) {
- CONSTEXPR_DCHECK(HeapType(heap_type).is_valid());
+ DCHECK(HeapType(heap_type).is_valid());
return ValueType(
KindField::encode(nullability == kNullable ? kOptRef : kRef) |
HeapTypeField::encode(heap_type));
@@ -291,14 +291,14 @@ class ValueType {
}
static constexpr ValueType Rtt(uint32_t type_index) {
- CONSTEXPR_DCHECK(HeapType(type_index).is_index());
+ DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRtt) |
HeapTypeField::encode(type_index));
}
static constexpr ValueType Rtt(uint32_t type_index,
uint8_t inheritance_depth) {
- CONSTEXPR_DCHECK(HeapType(type_index).is_index());
+ DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRttWithDepth) |
HeapTypeField::encode(type_index) |
DepthField::encode(inheritance_depth));
@@ -340,27 +340,34 @@ class ValueType {
return is_packed() ? Primitive(kI32) : *this;
}
+ // Returns the version of this type that does not allow null values. Handles
+ // bottom.
+ constexpr ValueType AsNonNull() const {
+ DCHECK(is_object_reference() || is_bottom());
+ return is_nullable() ? Ref(heap_type(), kNonNullable) : *this;
+ }
+
/***************************** Field Accessors ******************************/
constexpr ValueKind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
- CONSTEXPR_DCHECK(is_object_reference());
+ DCHECK(is_object_reference());
return static_cast<HeapType::Representation>(
HeapTypeField::decode(bit_field_));
}
constexpr HeapType heap_type() const {
- CONSTEXPR_DCHECK(is_object_reference());
+ DCHECK(is_object_reference());
return HeapType(heap_representation());
}
constexpr uint8_t depth() const {
- CONSTEXPR_DCHECK(has_depth());
+ DCHECK(has_depth());
return DepthField::decode(bit_field_);
}
constexpr uint32_t ref_index() const {
- CONSTEXPR_DCHECK(has_index());
+ DCHECK(has_index());
return HeapTypeField::decode(bit_field_);
}
constexpr Nullability nullability() const {
- CONSTEXPR_DCHECK(is_object_reference());
+ DCHECK(is_object_reference());
return kind() == kOptRef ? kNullable : kNonNullable;
}
@@ -426,7 +433,7 @@ class ValueType {
// (e.g., Ref(HeapType::kFunc, kNullable).value_type_code will return
// kFuncrefCode and not kOptRefCode).
constexpr ValueTypeCode value_type_code() const {
- CONSTEXPR_DCHECK(kind() != kBottom);
+ DCHECK_NE(kBottom, kind());
switch (kind()) {
case kOptRef:
switch (heap_representation()) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index d9225103bb..8907cbab31 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -26,6 +26,7 @@
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
+#include "src/wasm/memory-protection-key.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
@@ -225,8 +226,8 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
ModuleWireBytes wire_bytes(native_module_->wire_bytes());
const WasmModule* module = native_module_->module();
- WireBytesRef name_ref = module->lazily_generated_names.LookupFunctionName(
- wire_bytes, index(), VectorOf(module->export_table));
+ WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
WasmName name = wire_bytes.GetNameOrNull(name_ref);
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
@@ -502,28 +503,14 @@ int WasmCode::GetSourcePositionBefore(int offset) {
return position;
}
-WasmCodeAllocator::OptionalLock::~OptionalLock() {
- if (allocator_) allocator_->mutex_.Unlock();
-}
-
-void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
- DCHECK(!is_locked());
- allocator_ = allocator;
- allocator->mutex_.Lock();
-}
-
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
- VirtualMemory code_space,
std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
- free_code_space_(code_space.region()),
async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
- owned_code_space_.emplace_back(std::move(code_space));
- async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
@@ -531,9 +518,12 @@ WasmCodeAllocator::~WasmCodeAllocator() {
committed_code_space());
}
-void WasmCodeAllocator::Init(NativeModule* native_module) {
- DCHECK_EQ(1, owned_code_space_.size());
- native_module->AddCodeSpace(owned_code_space_[0].region(), {});
+void WasmCodeAllocator::Init(VirtualMemory code_space) {
+ DCHECK(owned_code_space_.empty());
+ DCHECK(free_code_space_.IsEmpty());
+ free_code_space_.Merge(code_space.region());
+ owned_code_space_.emplace_back(std::move(code_space));
+ async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
namespace {
@@ -625,18 +615,11 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
- return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion,
- WasmCodeAllocator::OptionalLock{});
+ return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
- NativeModule* native_module, size_t size, base::AddressRegion region,
- const WasmCodeAllocator::OptionalLock& optional_lock) {
- OptionalLock new_lock;
- if (!optional_lock.is_locked()) new_lock.Lock(this);
- const auto& locked_lock =
- optional_lock.is_locked() ? optional_lock : new_lock;
- DCHECK(locked_lock.is_locked());
+ NativeModule* native_module, size_t size, base::AddressRegion region) {
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
@@ -667,7 +650,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
- native_module->AddCodeSpace(new_region, locked_lock);
+ native_module->AddCodeSpaceLocked(new_region);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
@@ -703,16 +686,40 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}
-bool WasmCodeAllocator::SetExecutable(bool executable) {
- base::MutexGuard lock(&mutex_);
- if (is_executable_ == executable) return true;
- TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
-
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+// TODO(dlehmann): Do not return the success as a bool, but instead fail hard.
+// That is, pull the CHECK from {NativeModuleModificationScope} in here and
+// return void.
+// TODO(dlehmann): Ensure {SetWritable(true)} is always paired up with a
+// {SetWritable(false)}, such that eventually the code space is write protected.
+// One solution is to make the API foolproof by hiding {SetWritable()} and
+// allowing change of permissions only through {NativeModuleModificationScope}.
+// TODO(dlehmann): Add tests that ensure the code space is eventually write-
+// protected.
+bool WasmCodeAllocator::SetWritable(bool writable) {
+ // Invariant: `this.writers_count_ > 0` iff `code space has W permission`.
+ // TODO(dlehmann): This is currently not fulfilled before the first call
+ // to SetWritable(false), because initial permissions are RWX.
+ // Fix by setting initial permissions to RX and adding writable permission
+ // where appropriate. See also {WasmCodeManager::Commit()}.
+ if (writable) {
+ if (++writers_count_ > 1) return true;
+ } else {
+ DCHECK_GT(writers_count_, 0);
+ if (--writers_count_ > 0) return true;
+ }
+ writable = writers_count_ > 0;
+ TRACE_HEAP("Setting module %p as writable: %d.\n", this, writable);
if (FLAG_wasm_write_protect_code_memory) {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
+ // Due to concurrent compilation and execution, we always need the execute
+ // permission, however during codegen we additionally need to write.
+ // Hence this does not actually achieve write-xor-execute, but merely
+ // "always-execute" with "no-write-eventually".
PageAllocator::Permission permission =
- executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
+ writable ? PageAllocator::kReadWriteExecute
+ : PageAllocator::kReadExecute;
#if V8_OS_WIN
// On windows, we need to switch permissions per separate virtual memory
// reservation.
@@ -725,8 +732,8 @@ bool WasmCodeAllocator::SetExecutable(bool executable) {
permission)) {
return false;
}
- TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
- executable);
+ TRACE_HEAP("Set %p:%p to writable:%d\n", vmem.address(), vmem.end(),
+ writable);
}
#else // V8_OS_WIN
size_t commit_page_size = page_allocator->CommitPageSize();
@@ -738,21 +745,46 @@ bool WasmCodeAllocator::SetExecutable(bool executable) {
permission)) {
return false;
}
- TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
- region.begin(), region.end(), executable);
+ TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to writable:%d\n",
+ region.begin(), region.end(), writable);
}
#endif // V8_OS_WIN
}
- is_executable_ = executable;
return true;
}
+bool WasmCodeAllocator::SetThreadWritable(bool writable) {
+ static thread_local int writable_nesting_level = 0;
+ if (writable) {
+ if (++writable_nesting_level > 1) return true;
+ } else {
+ DCHECK_GT(writable_nesting_level, 0);
+ if (--writable_nesting_level > 0) return true;
+ }
+ writable = writable_nesting_level > 0;
+
+ int key = code_manager_->memory_protection_key_;
+
+ MemoryProtectionKeyPermission permissions =
+ writable ? kNoRestrictions : kDisableWrite;
+
+ TRACE_HEAP("Setting memory protection key %d to writable: %d.\n", key,
+ writable);
+ return SetPermissionsForMemoryProtectionKey(key, permissions);
+}
+
void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
size_t code_size = 0;
CODE_SPACE_WRITE_SCOPE
for (WasmCode* code : codes) {
+ // TODO(dlehmann): Pull the {NativeModuleModificationScope} out of the loop.
+ // However, its constructor requires a {NativeModule}.
+ // Can be fixed if {NativeModuleModificationScope()} is changed to take
+ // only a {WasmCodeAllocator} in its constructor.
+ NativeModuleModificationScope native_module_modification_scope(
+ code->native_module());
ZapCode(code->instruction_start(), code->instructions().size());
FlushInstructionCache(code->instruction_start(),
code->instructions().size());
@@ -768,19 +800,16 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
DisjointAllocationPool regions_to_decommit;
PageAllocator* allocator = GetPlatformPageAllocator();
size_t commit_page_size = allocator->CommitPageSize();
- {
- base::MutexGuard guard(&mutex_);
- for (auto region : freed_regions.regions()) {
- auto merged_region = freed_code_space_.Merge(region);
- Address discard_start =
- std::max(RoundUp(merged_region.begin(), commit_page_size),
- RoundDown(region.begin(), commit_page_size));
- Address discard_end =
- std::min(RoundDown(merged_region.end(), commit_page_size),
- RoundUp(region.end(), commit_page_size));
- if (discard_start >= discard_end) continue;
- regions_to_decommit.Merge({discard_start, discard_end - discard_start});
- }
+ for (auto region : freed_regions.regions()) {
+ auto merged_region = freed_code_space_.Merge(region);
+ Address discard_start =
+ std::max(RoundUp(merged_region.begin(), commit_page_size),
+ RoundDown(region.begin(), commit_page_size));
+ Address discard_end =
+ std::min(RoundDown(merged_region.end(), commit_page_size),
+ RoundUp(region.end(), commit_page_size));
+ if (discard_start >= discard_end) continue;
+ regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
for (auto region : regions_to_decommit.regions()) {
@@ -795,7 +824,6 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
- base::MutexGuard lock(&mutex_);
return owned_code_space_.size();
}
@@ -809,8 +837,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<NativeModule>* shared_this)
: engine_(engine),
engine_scope_(engine->GetBarrierForBackgroundCompile()->TryLock()),
- code_allocator_(engine->code_manager(), std::move(code_space),
- async_counters),
+ code_allocator_(engine->code_manager(), async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
@@ -838,7 +865,14 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::fill_n(num_liftoff_function_calls_.get(),
module_->num_declared_functions, kCounterStart);
}
- code_allocator_.Init(this);
+ // Even though there cannot be another thread using this object (since we are
+ // just constructing it), we need to hold the mutex to fulfill the
+ // precondition of {WasmCodeAllocator::Init}, which calls
+ // {NativeModule::AddCodeSpaceLocked}.
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ auto initial_region = code_space.region();
+ code_allocator_.Init(std::move(code_space));
+ AddCodeSpaceLocked(initial_region);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
@@ -852,16 +886,13 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
code_table_ = std::move(new_table);
base::AddressRegion single_code_space_region;
- {
- base::MutexGuard guard(&allocation_mutex_);
- CHECK_EQ(1, code_space_data_.size());
- single_code_space_region = code_space_data_[0].region;
- }
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
+ CHECK_EQ(1, code_space_data_.size());
+ single_code_space_region = code_space_data_[0].region;
// Re-allocate jump table.
- main_jump_table_ = CreateEmptyJumpTableInRegion(
+ main_jump_table_ = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
- single_code_space_region, WasmCodeAllocator::OptionalLock{});
- base::MutexGuard guard(&allocation_mutex_);
+ single_code_space_region);
code_space_data_[0].jump_table = main_jump_table_;
}
@@ -879,7 +910,7 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
for (auto& owned_entry : owned_code_) {
owned_entry.second->LogCode(isolate, source_url.get(), script.id());
}
@@ -890,11 +921,12 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
CompilationEnv NativeModule::CreateCompilationEnv() const {
return {module(), use_trap_handler_, kRuntimeExceptionSupport,
- enabled_features_, kNoLowerSimd};
+ enabled_features_};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
const size_t relocation_size = code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
@@ -929,6 +961,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
const int constant_pool_offset = base_offset + code->constant_pool_offset();
const int code_comments_offset = base_offset + code->code_comments_offset();
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
base::Memcpy(dst_code_bytes.begin(), instructions.begin(),
@@ -940,7 +973,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
- FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(dst_code_bytes));
Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
@@ -982,7 +1015,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
new_code->MaybePrint();
new_code->Validate();
- return PublishCode(std::move(new_code));
+ return PublishCodeLocked(std::move(new_code));
}
void NativeModule::UseLazyStub(uint32_t func_index) {
@@ -990,25 +1023,24 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
- base::AddressRegion single_code_space_region;
- {
- base::MutexGuard guard(&allocation_mutex_);
- DCHECK_EQ(1, code_space_data_.size());
- single_code_space_region = code_space_data_[0].region;
- }
- lazy_compile_table_ = CreateEmptyJumpTableInRegion(
+ NativeModuleModificationScope native_module_modification_scope(this);
+ DCHECK_EQ(1, code_space_data_.size());
+ base::AddressRegion single_code_space_region = code_space_data_[0].region;
+ lazy_compile_table_ = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
- single_code_space_region, WasmCodeAllocator::OptionalLock{});
+ single_code_space_region);
JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions,
- GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
- FindJumpTablesForRegion(base::AddressRegionOf(
- lazy_compile_table_->instructions()))));
+ GetNearRuntimeStubEntry(
+ WasmCode::kWasmCompileLazy,
+ FindJumpTablesForRegionLocked(
+ base::AddressRegionOf(lazy_compile_table_->instructions()))));
}
// Add jump table entry for jump to the lazy compile stub.
@@ -1017,7 +1049,6 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
Address lazy_compile_target =
lazy_compile_table_->instruction_start() +
JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
- base::MutexGuard guard(&allocation_mutex_);
PatchJumpTablesLocked(slot_index, lazy_compile_target);
}
@@ -1026,10 +1057,14 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging) {
- Vector<byte> code_space =
- code_allocator_.AllocateForCode(this, desc.instr_size);
- auto jump_table_ref =
- FindJumpTablesForRegion(base::AddressRegionOf(code_space));
+ Vector<byte> code_space;
+ NativeModule::JumpTablesRef jump_table_ref;
+ {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
+ jump_table_ref =
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
+ }
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
source_position_table, kind, tier, for_debugging,
@@ -1057,6 +1092,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
const int instr_size = desc.instr_size;
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
base::Memcpy(dst_code_bytes.begin(), desc.buffer,
static_cast<size_t>(desc.instr_size));
@@ -1107,7 +1143,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode");
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
@@ -1117,7 +1153,7 @@ std::vector<WasmCode*> NativeModule::PublishCode(
"wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
// The published code is put into the top-most surrounding {WasmCodeRefScope}.
for (auto& code : codes) {
published_code.push_back(PublishCodeLocked(std::move(code)));
@@ -1138,8 +1174,7 @@ WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
WasmCode* NativeModule::PublishCodeLocked(
std::unique_ptr<WasmCode> owned_code) {
- // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
WasmCode* code = owned_code.get();
new_owned_code_.emplace_back(std::move(owned_code));
@@ -1148,7 +1183,7 @@ WasmCode* NativeModule::PublishCodeLocked(
// guaranteed to be valid.
WasmCodeRefScope::AddRef(code);
- if (code->IsAnonymous() || code->index() < module_->num_imported_functions) {
+ if (code->index() < static_cast<int>(module_->num_imported_functions)) {
return code;
}
@@ -1208,7 +1243,7 @@ WasmCode* NativeModule::PublishCodeLocked(
}
void NativeModule::ReinstallDebugCode(WasmCode* code) {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
DCHECK_EQ(this, code->native_module());
DCHECK_EQ(kWithBreakpoints, code->for_debugging());
@@ -1230,9 +1265,14 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
-Vector<uint8_t> NativeModule::AllocateForDeserializedCode(
- size_t total_code_size) {
- return code_allocator_.AllocateForCode(this, total_code_size);
+std::pair<Vector<uint8_t>, NativeModule::JumpTablesRef>
+NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ Vector<uint8_t> code_space =
+ code_allocator_.AllocateForCode(this, total_code_size);
+ auto jump_tables =
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
+ return {code_space, jump_tables};
}
std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
@@ -1253,7 +1293,7 @@ std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
WasmCode** start = code_table_.get();
WasmCode** end = start + module_->num_declared_functions;
for (WasmCode* code : VectorOf(start, end - start)) {
@@ -1263,19 +1303,19 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
}
WasmCode* NativeModule::GetCode(uint32_t index) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
if (code) WasmCodeRefScope::AddRef(code);
return code;
}
bool NativeModule::HasCode(uint32_t index) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
return code_table_[declared_function_index(module(), index)] != nullptr;
}
bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
return code_table_[declared_function_index(module(), index)] != nullptr &&
code_table_[declared_function_index(module(), index)]->tier() == tier;
}
@@ -1289,16 +1329,17 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
return source_map_.get();
}
-WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
- int jump_table_size, base::AddressRegion region,
- const WasmCodeAllocator::OptionalLock& allocator_lock) {
+WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
+ int jump_table_size, base::AddressRegion region) {
+ allocation_mutex_.AssertHeld();
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
- this, jump_table_size, region, allocator_lock);
+ Vector<uint8_t> code_space =
+ code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
@@ -1317,7 +1358,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
WasmCode::kJumpTable, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
- return PublishCode(std::move(code));
+ return PublishCodeLocked(std::move(code));
}
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
@@ -1330,10 +1371,10 @@ void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
}
void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
- // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.jump_table) continue;
@@ -1343,8 +1384,7 @@ void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
uint32_t slot_index, Address target) {
- // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
@@ -1369,9 +1409,9 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
target);
}
-void NativeModule::AddCodeSpace(
- base::AddressRegion region,
- const WasmCodeAllocator::OptionalLock& allocator_lock) {
+void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
+ allocation_mutex_.AssertHeld();
+
// Each code space must be at least twice as large as the overhead per code
// space. Otherwise, we are wasting too much memory.
DCHECK_GE(region.size(),
@@ -1387,8 +1427,8 @@ void NativeModule::AddCodeSpace(
->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
- Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
- this, size, region, allocator_lock);
+ Vector<byte> padding =
+ code_allocator_.AllocateForCodeInRegion(this, size, region);
CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(region.begin()), region.size());
@@ -1397,28 +1437,29 @@ void NativeModule::AddCodeSpace(
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool is_first_code_space = code_space_data_.empty();
// We always need a far jump table, because it contains the runtime stubs.
- const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
+ const bool needs_far_jump_table =
+ !FindJumpTablesForRegionLocked(region).is_valid();
const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
if (needs_jump_table) {
- jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
- allocator_lock);
+ jump_table = CreateEmptyJumpTableInRegionLocked(
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
CHECK(region.contains(jump_table->instruction_start()));
}
if (needs_far_jump_table) {
int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
- far_jump_table = CreateEmptyJumpTableInRegion(
+ far_jump_table = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount,
NumWasmFunctionsInFarJumpTable(num_function_slots)),
- region, allocator_lock);
+ region);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
@@ -1446,7 +1487,6 @@ void NativeModule::AddCodeSpace(
main_far_jump_table_ = far_jump_table;
}
- base::MutexGuard guard(&allocation_mutex_);
code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
if (jump_table && !is_first_code_space) {
@@ -1499,8 +1539,7 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
}
void NativeModule::TransferNewOwnedCodeLocked() const {
- // The caller holds the allocation mutex.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
DCHECK(!new_owned_code_.empty());
// Sort the {new_owned_code_} vector reversed, such that the position of the
// previously inserted element can be used as a hint for the next element. If
@@ -1524,8 +1563,7 @@ void NativeModule::TransferNewOwnedCodeLocked() const {
}
void NativeModule::InsertToCodeCache(WasmCode* code) {
- // The caller holds {allocation_mutex_}.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
DCHECK_NOT_NULL(cached_code_);
if (code->IsAnonymous()) return;
// Only cache Liftoff debugging code or TurboFan code (no breakpoints or
@@ -1541,7 +1579,7 @@ void NativeModule::InsertToCodeCache(WasmCode* code) {
}
WasmCode* NativeModule::Lookup(Address pc) const {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
auto iter = owned_code_.upper_bound(pc);
if (iter == owned_code_.begin()) return nullptr;
@@ -1566,8 +1604,9 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
return main_jump_table_->instruction_start() + slot_offset;
}
-NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
+NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
base::AddressRegion code_region) const {
+ allocation_mutex_.AssertHeld();
auto jump_table_usable = [code_region](const WasmCode* jump_table) {
Address table_start = jump_table->instruction_start();
Address table_end = table_start + jump_table->instructions().size();
@@ -1583,18 +1622,6 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
- // Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
- // Access to these fields is possible without locking, since these fields are
- // initialized on construction of the {NativeModule}.
- if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
- (main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
- return {
- main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
- main_far_jump_table_->instruction_start()};
- }
-
- // Otherwise, take the mutex and look for another suitable jump table.
- base::MutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.far_jump_table) continue;
@@ -1645,7 +1672,7 @@ uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
}
WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
if (code_space_data.far_jump_table != nullptr &&
@@ -1679,10 +1706,22 @@ NativeModule::~NativeModule() {
WasmCodeManager::WasmCodeManager(size_t max_committed)
: max_committed_code_space_(max_committed),
- critical_committed_code_space_(max_committed / 2) {
+ critical_committed_code_space_(max_committed / 2),
+ memory_protection_key_(FLAG_wasm_memory_protection_keys
+ ? AllocateMemoryProtectionKey()
+ : kNoMemoryProtectionKey) {
DCHECK_LE(max_committed, FLAG_wasm_max_code_space * MB);
}
+WasmCodeManager::~WasmCodeManager() {
+ // No more committed code space.
+ DCHECK_EQ(0, total_committed_code_space_.load());
+
+ if (FLAG_wasm_memory_protection_keys) {
+ FreeMemoryProtectionKey(memory_protection_key_);
+ }
+}
+
#if defined(V8_OS_WIN64)
bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
@@ -1711,16 +1750,38 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
break;
}
}
- PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
- ? PageAllocator::kReadWrite
- : PageAllocator::kReadWriteExecute;
-
- TRACE_HEAP("Setting rw permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
- region.begin(), region.end());
+ // Even when we employ W^X with FLAG_wasm_write_protect_code_memory == true,
+ // code pages need to be initially allocated with RWX permission because of
+ // concurrent compilation/execution. For this reason there is no distinction
+ // here based on FLAG_wasm_write_protect_code_memory.
+ // TODO(dlehmann): This allocates initially as writable and executable, and
+ // as such is not safe-by-default. In particular, if
+ // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
+ // because no {NativeModuleModificationScope} is created), the writable
+ // permission is never withdrawn.
+ // One potential fix is to allocate initially with kReadExecute only, which
+ // forces all compilation threads to add the missing
+ // {NativeModuleModificationScope}s before modification; and/or adding
+ // DCHECKs that {NativeModuleModificationScope} is open when calling this
+ // method.
+ PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
+
+ bool success;
+ if (FLAG_wasm_memory_protection_keys) {
+ TRACE_HEAP(
+ "Setting rwx permissions and memory protection key %d for 0x%" PRIxPTR
+ ":0x%" PRIxPTR "\n",
+ memory_protection_key_, region.begin(), region.end());
+ success = SetPermissionsAndMemoryProtectionKey(
+ GetPlatformPageAllocator(), region, permission, memory_protection_key_);
+ } else {
+ TRACE_HEAP("Setting rwx permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
+ region.begin(), region.end());
+ success = SetPermissions(GetPlatformPageAllocator(), region.begin(),
+ region.size(), permission);
+ }
- if (!SetPermissions(GetPlatformPageAllocator(), region.begin(), region.size(),
- permission)) {
- // Highly unlikely.
+ if (V8_UNLIKELY(!success)) {
V8::FatalProcessOutOfMemory(
nullptr,
"WasmCodeManager::Commit: Cannot make pre-reserved region writable");
@@ -1739,8 +1800,13 @@ void WasmCodeManager::Decommit(base::AddressRegion region) {
USE(old_committed);
TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
- CHECK(allocator->SetPermissions(reinterpret_cast<void*>(region.begin()),
- region.size(), PageAllocator::kNoAccess));
+ if (FLAG_wasm_memory_protection_keys) {
+ CHECK(SetPermissionsAndMemoryProtectionKey(
+ allocator, region, PageAllocator::kNoAccess, kNoMemoryProtectionKey));
+ } else {
+ CHECK(SetPermissions(allocator, region.begin(), region.size(),
+ PageAllocator::kNoAccess));
+ }
}
void WasmCodeManager::AssignRange(base::AddressRegion region,
@@ -2002,10 +2068,15 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
}
- Vector<byte> code_space =
- code_allocator_.AllocateForCode(this, total_code_space);
- // Lookup the jump tables to use once, then use for all code objects.
- auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
+ Vector<byte> code_space;
+ NativeModule::JumpTablesRef jump_tables;
+ {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ code_space = code_allocator_.AllocateForCode(this, total_code_space);
+ // Lookup the jump tables to use once, then use for all code objects.
+ jump_tables =
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
+ }
// If we happen to have a {total_code_space} which is bigger than
// {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
// region. If this ever happens, we need to handle this case (by splitting the
@@ -2017,6 +2088,7 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
// Now copy the generated code into the code space and relocate it.
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
@@ -2039,12 +2111,12 @@ void NativeModule::SetTieringState(TieringState new_tiering_state) {
// Do not tier down asm.js (just never change the tiering state).
if (module()->origin != kWasmOrigin) return;
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
tiering_state_ = new_tiering_state;
}
bool NativeModule::IsTieredDown() {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
return tiering_state_ == kTieredDown;
}
@@ -2054,7 +2126,7 @@ void NativeModule::RecompileForTiering() {
// compilation units finish, code installation will handle that correctly.
TieringState current_state;
{
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
current_state = tiering_state_;
// Initialize {cached_code_} to signal that this cache should get filled
@@ -2074,7 +2146,7 @@ void NativeModule::RecompileForTiering() {
std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) {
WasmCodeRefScope code_ref_scope;
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
std::vector<int> function_indexes;
int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions;
@@ -2110,19 +2182,16 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
// Free the code space.
code_allocator_.FreeCode(codes);
- DebugInfo* debug_info = nullptr;
- {
- base::MutexGuard guard(&allocation_mutex_);
- if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
- debug_info = debug_info_.get();
- // Free the {WasmCode} objects. This will also unregister trap handler data.
- for (WasmCode* code : codes) {
- DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
- owned_code_.erase(code->instruction_start());
- }
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
+ DebugInfo* debug_info = debug_info_.get();
+ // Free the {WasmCode} objects. This will also unregister trap handler data.
+ for (WasmCode* code : codes) {
+ DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
+ owned_code_.erase(code->instruction_start());
}
// Remove debug side tables for all removed code objects, after releasing our
// lock. This is to avoid lock order inversion.
@@ -2130,16 +2199,17 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
}
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
return code_allocator_.GetNumCodeSpaces();
}
bool NativeModule::HasDebugInfo() const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
return debug_info_ != nullptr;
}
DebugInfo* NativeModule::GetDebugInfo() {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
return debug_info_.get();
}
@@ -2200,17 +2270,30 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
- if (FLAG_wasm_write_protect_code_memory && native_module_ &&
- (native_module_->modification_scope_depth_++) == 0) {
- bool success = native_module_->SetExecutable(false);
+ DCHECK_NOT_NULL(native_module_);
+ if (FLAG_wasm_memory_protection_keys) {
+ bool success = native_module_->SetThreadWritable(true);
+ if (!success && FLAG_wasm_write_protect_code_memory) {
+ // Fallback to mprotect-based write protection (much slower).
+ success = native_module_->SetWritable(true);
+ CHECK(success);
+ }
+ } else if (FLAG_wasm_write_protect_code_memory) {
+ bool success = native_module_->SetWritable(true);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (FLAG_wasm_write_protect_code_memory && native_module_ &&
- (native_module_->modification_scope_depth_--) == 1) {
- bool success = native_module_->SetExecutable(true);
+ if (FLAG_wasm_memory_protection_keys) {
+ bool success = native_module_->SetThreadWritable(false);
+ if (!success && FLAG_wasm_write_protect_code_memory) {
+ // Fallback to mprotect-based write protection (much slower).
+ success = native_module_->SetWritable(false);
+ CHECK(success);
+ }
+ } else if (FLAG_wasm_write_protect_code_memory) {
+ bool success = native_module_->SetWritable(false);
CHECK(success);
}
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 2996a6e2c6..312f5346b4 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -26,6 +26,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
+#include "src/wasm/memory-protection-key.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module-sourcemap.h"
@@ -94,7 +95,8 @@ struct WasmModule;
V(WasmAllocateArrayWithRtt) \
V(WasmAllocateRtt) \
V(WasmAllocateStructWithRtt) \
- V(WasmSubtypeCheck)
+ V(WasmSubtypeCheck) \
+ V(WasmOnStackReplace)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
@@ -160,11 +162,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
}
- // TODO(clemensb): Make this return int.
- uint32_t index() const {
- DCHECK_LE(0, index_);
- return index_;
- }
+ int index() const { return index_; }
// Anonymous functions are functions that don't carry an index.
bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
Kind kind() const { return KindField::decode(flags_); }
@@ -397,31 +395,11 @@ class WasmCodeAllocator {
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif
- // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
- // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
- // optional to allow to also call methods without holding the lock.
- class OptionalLock {
- public:
- // External users can only instantiate a non-locked {OptionalLock}.
- OptionalLock() = default;
- ~OptionalLock();
- bool is_locked() const { return allocator_ != nullptr; }
-
- private:
- friend class WasmCodeAllocator;
- // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
- // is passed.
- void Lock(WasmCodeAllocator*);
-
- WasmCodeAllocator* allocator_ = nullptr;
- };
-
- WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
- std::shared_ptr<Counters> async_counters);
+ WasmCodeAllocator(WasmCodeManager*, std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
// Call before use, after the {NativeModule} is set up completely.
- void Init(NativeModule*);
+ void Init(VirtualMemory code_space);
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
@@ -434,22 +412,32 @@ class WasmCodeAllocator {
}
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
- base::AddressRegion,
- const WasmCodeAllocator::OptionalLock&);
+ base::AddressRegion);
+
+ // Sets permissions of all owned code space to read-write or read-only (if
+ // {writable} is false). Returns true on success.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
+ V8_EXPORT_PRIVATE bool SetWritable(bool writable);
- // Sets permissions of all owned code space to executable, or read-write (if
- // {executable} is false). Returns true on success.
- V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
+ // Set this thread's permission of all owned code space to read-write or
+ // read-only (if {writable} is false). Uses memory protection keys.
+ // Returns true on success. Since the permission is thread-local, there is no
+ // requirement to hold any lock when calling this method.
+ bool SetThreadWritable(bool writable);
// Free memory pages of all given code objects. Used for wasm code GC.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
void FreeCode(Vector<WasmCode* const>);
// Retrieve the number of separately reserved code spaces.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
size_t GetNumCodeSpaces() const;
private:
@@ -461,10 +449,8 @@ class WasmCodeAllocator {
// The engine-wide wasm code manager.
WasmCodeManager* const code_manager_;
- mutable base::Mutex mutex_;
-
//////////////////////////////////////////////////////////////////////////////
- // Protected by {mutex_}:
+ // These fields are protected by the mutex in {NativeModule}.
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
@@ -476,6 +462,8 @@ class WasmCodeAllocator {
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
+ int writers_count_{0};
+
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -483,8 +471,6 @@ class WasmCodeAllocator {
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
- bool is_executable_ = false;
-
std::shared_ptr<Counters> async_counters_;
};
@@ -523,7 +509,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
// table and jump table via another {PublishCode}.
void ReinstallDebugCode(WasmCode*);
- Vector<uint8_t> AllocateForDeserializedCode(size_t total_code_size);
+ struct JumpTablesRef {
+ Address jump_table_start = kNullAddress;
+ Address far_jump_table_start = kNullAddress;
+
+ bool is_valid() const { return far_jump_table_start != kNullAddress; }
+ };
+
+ std::pair<Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
+ size_t total_code_size);
std::unique_ptr<WasmCode> AddDeserializedCode(
int index, Vector<byte> instructions, int stack_slots,
@@ -564,26 +558,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
// the first jump table).
Address GetCallTargetForFunction(uint32_t func_index) const;
- struct JumpTablesRef {
- Address jump_table_start = kNullAddress;
- Address far_jump_table_start = kNullAddress;
-
- bool is_valid() const { return far_jump_table_start != kNullAddress; }
- };
-
// Finds the jump tables that should be used for given code region. This
// information is then passed to {GetNearCallTargetForFunction} and
// {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
// up there. Return an empty struct if no suitable jump tables exist.
- JumpTablesRef FindJumpTablesForRegion(base::AddressRegion) const;
+ JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
// Similarly to {GetCallTargetForFunction}, but uses the jump table previously
- // looked up via {FindJumpTablesForRegion}.
+ // looked up via {FindJumpTablesForRegionLocked}.
Address GetNearCallTargetForFunction(uint32_t func_index,
const JumpTablesRef&) const;
// Get a runtime stub entry (which is a far jump table slot) in the jump table
- // previously looked up via {FindJumpTablesForRegion}.
+ // previously looked up via {FindJumpTablesForRegionLocked}.
Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
const JumpTablesRef&) const;
@@ -591,8 +578,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
- bool SetExecutable(bool executable) {
- return code_allocator_.SetExecutable(executable);
+ bool SetWritable(bool writable) {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ return code_allocator_.SetWritable(writable);
+ }
+
+ bool SetThreadWritable(bool writable) {
+ return code_allocator_.SetThreadWritable(writable);
}
// For cctests, where we build both WasmModule and the runtime objects
@@ -726,9 +718,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
ExecutionTier tier, ForDebugging for_debugging,
Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
- WasmCode* CreateEmptyJumpTableInRegion(
- int jump_table_size, base::AddressRegion,
- const WasmCodeAllocator::OptionalLock&);
+ WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
+ base::AddressRegion);
void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
@@ -740,8 +731,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
- void AddCodeSpace(base::AddressRegion,
- const WasmCodeAllocator::OptionalLock&);
+ void AddCodeSpaceLocked(base::AddressRegion);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
@@ -806,7 +796,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<uint32_t[]> num_liftoff_function_calls_;
// This mutex protects concurrent calls to {AddCode} and friends.
- mutable base::Mutex allocation_mutex_;
+ // TODO(dlehmann): Revert this to a regular {Mutex} again.
+ // This needs to be a {RecursiveMutex} only because of
+ // {NativeModuleModificationScope} usages, which are (1) either at places
+ // that already hold the {allocation_mutex_} or (2) because of multiple open
+ // {NativeModuleModificationScope}s in the call hierarchy. Both are fixable.
+ mutable base::RecursiveMutex allocation_mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {allocation_mutex_}:
@@ -847,7 +842,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
- int modification_scope_depth_ = 0;
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool lazy_compile_frozen_ = false;
std::atomic<size_t> liftoff_bailout_count_{0};
@@ -861,12 +855,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCodeManager(const WasmCodeManager&) = delete;
WasmCodeManager& operator=(const WasmCodeManager&) = delete;
-#ifdef DEBUG
- ~WasmCodeManager() {
- // No more committed code space.
- DCHECK_EQ(0, total_committed_code_space_.load());
- }
-#endif
+ ~WasmCodeManager();
#if defined(V8_OS_WIN64)
bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
@@ -922,6 +911,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
+ const int memory_protection_key_;
+
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index a02ea78e85..f960e7c201 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -101,10 +101,11 @@ enum SectionCode : int8_t {
kDebugInfoSectionCode, // DWARF section .debug_info
kExternalDebugInfoSectionCode, // Section encoding the external symbol path
kCompilationHintsSectionCode, // Compilation hints section
+ kBranchHintsSectionCode, // Branch hints section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
- kLastKnownModuleSection = kCompilationHintsSectionCode,
+ kLastKnownModuleSection = kBranchHintsSectionCode,
kFirstUnorderedSection = kDataCountSectionCode,
};
@@ -156,6 +157,10 @@ constexpr int kAnonymousFuncIndex = -1;
// often enough.
constexpr uint32_t kGenericWrapperBudget = 1000;
+#if V8_TARGET_ARCH_X64
+constexpr int32_t kOSRTargetOffset = 3 * kSystemPointerSize;
+#endif
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 22872f5d88..ad4e7853aa 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -697,13 +697,19 @@ class DebugInfoImpl {
DCHECK_EQ(frame->function_index(), new_code->index());
DCHECK_EQ(frame->native_module(), new_code->native_module());
DCHECK(frame->wasm_code()->is_liftoff());
+ Address new_pc =
+ FindNewPC(frame, new_code, frame->byte_offset(), return_location);
#ifdef DEBUG
int old_position = frame->position();
#endif
- Address new_pc =
- FindNewPC(frame, new_code, frame->byte_offset(), return_location);
+#if V8_TARGET_ARCH_X64
+ if (frame->wasm_code()->for_debugging()) {
+ base::Memory<Address>(frame->fp() - kOSRTargetOffset) = new_pc;
+ }
+#else
PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
kSystemPointerSize);
+#endif
// The frame position should still be the same after OSR.
DCHECK_EQ(old_position, frame->position());
}
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index ed19f89a5e..c38236dc78 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -18,6 +18,7 @@
#include "src/strings/string-hasher-inl.h"
#include "src/utils/ostreams.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/memory-protection-key.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/module-instantiate.h"
@@ -347,7 +348,8 @@ struct WasmEngine::CurrentGCInfo {
struct WasmEngine::IsolateInfo {
explicit IsolateInfo(Isolate* isolate)
: log_codes(WasmCode::ShouldBeLogged(isolate)),
- async_counters(isolate->async_counters()) {
+ async_counters(isolate->async_counters()),
+ wrapper_compilation_barrier_(std::make_shared<OperationsBarrier>()) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
@@ -398,6 +400,12 @@ struct WasmEngine::IsolateInfo {
int throw_count = 0;
int rethrow_count = 0;
int catch_count = 0;
+
+ // Operations barrier to synchronize on wrapper compilation on isolate
+ // shutdown.
+ // TODO(wasm): Remove this once we can use the generic js-to-wasm wrapper
+ // everywhere.
+ std::shared_ptr<OperationsBarrier> wrapper_compilation_barrier_;
};
struct WasmEngine::NativeModuleInfo {
@@ -934,9 +942,10 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
// Under the mutex get all jobs to delete. Then delete them without holding
// the mutex, such that deletion can reenter the WasmEngine.
std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
+ std::vector<std::weak_ptr<NativeModule>> modules_in_isolate;
+ std::shared_ptr<OperationsBarrier> wrapper_compilation_barrier;
{
base::MutexGuard guard(&mutex_);
- DCHECK_EQ(1, isolates_.count(isolate));
for (auto it = async_compile_jobs_.begin();
it != async_compile_jobs_.end();) {
if (it->first->isolate() != isolate) {
@@ -946,7 +955,34 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
jobs_to_delete.push_back(std::move(it->second));
it = async_compile_jobs_.erase(it);
}
+ DCHECK_EQ(1, isolates_.count(isolate));
+ auto* isolate_info = isolates_[isolate].get();
+ wrapper_compilation_barrier = isolate_info->wrapper_compilation_barrier_;
+ for (auto* native_module : isolate_info->native_modules) {
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ modules_in_isolate.emplace_back(native_modules_[native_module]->weak_ptr);
+ }
}
+
+ // All modules that have not finished initial compilation yet cannot be
+ // shared with other isolates. Hence we cancel their compilation. In
+ // particular, this will cancel wrapper compilation which is bound to this
+ // isolate (this would be a UAF otherwise).
+ for (auto& weak_module : modules_in_isolate) {
+ if (auto shared_module = weak_module.lock()) {
+ shared_module->compilation_state()->CancelInitialCompilation();
+ }
+ }
+
+ // After cancelling, wait for all current wrapper compilation to actually
+ // finish.
+ wrapper_compilation_barrier->CancelAndWait();
+}
+
+OperationsBarrier::Token WasmEngine::StartWrapperCompilation(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ return isolates_[isolate]->wrapper_compilation_barrier_->TryLock();
}
void WasmEngine::AddIsolate(Isolate* isolate) {
@@ -954,6 +990,15 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
DCHECK_EQ(0, isolates_.count(isolate));
isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
+ // Record memory protection key support.
+ if (FLAG_wasm_memory_protection_keys) {
+ auto* histogram =
+ isolate->counters()->wasm_memory_protection_keys_support();
+ bool has_mpk =
+ code_manager()->memory_protection_key_ != kNoMemoryProtectionKey;
+ histogram->AddSample(has_mpk ? 1 : 0);
+ }
+
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
@@ -1281,6 +1326,18 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
StackFrame* const frame = it.frame();
if (frame->type() != StackFrame::WASM) continue;
live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
+#if V8_TARGET_ARCH_X64
+ if (WasmFrame::cast(frame)->wasm_code()->for_debugging()) {
+ Address osr_target = base::Memory<Address>(WasmFrame::cast(frame)->fp() -
+ kOSRTargetOffset);
+ if (osr_target) {
+ WasmCode* osr_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(osr_target);
+ DCHECK_NOT_NULL(osr_code);
+ live_wasm_code.insert(osr_code);
+ }
+ }
+#endif
}
CheckNoArchivedThreads(isolate);
@@ -1514,24 +1571,29 @@ void WasmEngine::PotentiallyFinishCurrentGC() {
namespace {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
- GetSharedWasmEngine)
+WasmEngine* global_wasm_engine = nullptr;
} // namespace
// static
void WasmEngine::InitializeOncePerProcess() {
- *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
+ DCHECK_NULL(global_wasm_engine);
+ global_wasm_engine = new WasmEngine();
}
// static
void WasmEngine::GlobalTearDown() {
- GetSharedWasmEngine()->reset();
+ // Note: This can be called multiple times in a row (see
+ // test-api/InitializeAndDisposeMultiple). This is fine, as
+ // {global_wasm_engine} will be nullptr then.
+ delete global_wasm_engine;
+ global_wasm_engine = nullptr;
}
// static
-std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
- return *GetSharedWasmEngine();
+WasmEngine* WasmEngine::GetWasmEngine() {
+ DCHECK_NOT_NULL(global_wasm_engine);
+ return global_wasm_engine;
}
// {max_mem_pages} is declared in wasm-limits.h.
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 5a26bd4457..d4736036cb 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -246,6 +246,12 @@ class V8_EXPORT_PRIVATE WasmEngine {
// for tearing down an isolate, or to clean it up to be reused.
void DeleteCompileJobsOnIsolate(Isolate* isolate);
+ // Get a token for compiling wrappers for an Isolate. The token is used to
+ // synchronize background tasks on isolate shutdown. The caller should only
+ // hold the token while compiling export wrappers. If the isolate is already
+ // shutting down, this method will return an invalid token.
+ OperationsBarrier::Token StartWrapperCompilation(Isolate*);
+
// Manage the set of Isolates that use this WasmEngine.
void AddIsolate(Isolate* isolate);
void RemoveIsolate(Isolate* isolate);
@@ -351,10 +357,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
static void InitializeOncePerProcess();
static void GlobalTearDown();
- // Returns a reference to the WasmEngine shared by the entire process. Try to
- // use {Isolate::wasm_engine} instead if it is available, which encapsulates
- // engine lifetime decisions during Isolate bootstrapping.
- static std::shared_ptr<WasmEngine> GetWasmEngine();
+ // Returns a reference to the WasmEngine shared by the entire process.
+ static WasmEngine* GetWasmEngine();
private:
struct CurrentGCInfo;
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 9c790d7c67..9adf3d662f 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -37,7 +37,12 @@
/* Relaxed SIMD proposal. */ \
/* https://github.com/WebAssembly/relaxed-simd */ \
/* V8 side owner: zhin */ \
- V(relaxed_simd, "relaxed simd", false)
+ V(relaxed_simd, "relaxed simd", false) \
+ \
+ /* Branch Hinting proposal. */ \
+ /* https://github.com/WebAssembly/branch-hinting */ \
+ /* V8 side owner: jkummerow */ \
+ V(branch_hinting, "branch hinting", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -76,13 +81,6 @@
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Multi-value proposal. */ \
- /* https://github.com/WebAssembly/multi-value */ \
- /* V8 side owner: thibaudm */ \
- /* Shipped in v8.6. */ \
- /* ITS: https://groups.google.com/g/v8-users/c/pv2E4yFWeF0 */ \
- V(mv, "multi-value support", true) \
- \
/* Fixed-width SIMD operations. */ \
/* https://github.com/webassembly/simd */ \
/* V8 side owner: gdeepti, zhin */ \
diff --git a/deps/v8/src/wasm/wasm-init-expr.cc b/deps/v8/src/wasm/wasm-init-expr.cc
new file mode 100644
index 0000000000..6348c58193
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-init-expr.cc
@@ -0,0 +1,57 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-init-expr.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const WasmInitExpr& expr) {
+ os << "(";
+ switch (expr.kind()) {
+ case WasmInitExpr::kNone:
+ UNREACHABLE();
+ case WasmInitExpr::kGlobalGet:
+ os << "global.get " << expr.immediate().index;
+ break;
+ case WasmInitExpr::kI32Const:
+ os << "i32.const " << expr.immediate().i32_const;
+ break;
+ case WasmInitExpr::kI64Const:
+ os << "i64.const " << expr.immediate().i64_const;
+ break;
+ case WasmInitExpr::kF32Const:
+ os << "f32.const " << expr.immediate().f32_const;
+ break;
+ case WasmInitExpr::kF64Const:
+ os << "f64.const " << expr.immediate().f64_const;
+ break;
+ case WasmInitExpr::kS128Const:
+ os << "s128.const 0x" << std::hex;
+ for (uint8_t b : expr.immediate().s128_const) {
+ os << b;
+ }
+ os << std::dec;
+ break;
+ case WasmInitExpr::kRefNullConst:
+ os << "ref.null " << expr.immediate().heap_type;
+ break;
+ case WasmInitExpr::kRefFuncConst:
+ os << "ref.func " << expr.immediate().index;
+ break;
+ case WasmInitExpr::kRttCanon:
+ os << "rtt.canon " << expr.immediate().heap_type;
+ break;
+ case WasmInitExpr::kRttSub:
+ os << "rtt.sub " << *expr.operand();
+ break;
+ }
+ os << ")";
+ return os;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-init-expr.h b/deps/v8/src/wasm/wasm-init-expr.h
new file mode 100644
index 0000000000..39fc1a7ee6
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-init-expr.h
@@ -0,0 +1,150 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_WASM_INIT_EXPR_H_
+#define V8_WASM_WASM_INIT_EXPR_H_
+
+#include <memory>
+
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Representation of an initializer expression.
+class WasmInitExpr {
+ public:
+ enum Operator {
+ kNone,
+ kGlobalGet,
+ kI32Const,
+ kI64Const,
+ kF32Const,
+ kF64Const,
+ kS128Const,
+ kRefNullConst,
+ kRefFuncConst,
+ kRttCanon,
+ kRttSub
+ };
+
+ union Immediate {
+ int32_t i32_const;
+ int64_t i64_const;
+ float f32_const;
+ double f64_const;
+ std::array<uint8_t, kSimd128Size> s128_const;
+ uint32_t index;
+ HeapType::Representation heap_type;
+ };
+
+ WasmInitExpr() : kind_(kNone) { immediate_.i32_const = 0; }
+ explicit WasmInitExpr(int32_t v) : kind_(kI32Const) {
+ immediate_.i32_const = v;
+ }
+ explicit WasmInitExpr(int64_t v) : kind_(kI64Const) {
+ immediate_.i64_const = v;
+ }
+ explicit WasmInitExpr(float v) : kind_(kF32Const) {
+ immediate_.f32_const = v;
+ }
+ explicit WasmInitExpr(double v) : kind_(kF64Const) {
+ immediate_.f64_const = v;
+ }
+ explicit WasmInitExpr(uint8_t v[kSimd128Size]) : kind_(kS128Const) {
+ base::Memcpy(immediate_.s128_const.data(), v, kSimd128Size);
+ }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInitExpr);
+
+ static WasmInitExpr GlobalGet(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kGlobalGet;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
+ static WasmInitExpr RefFuncConst(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kRefFuncConst;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
+ static WasmInitExpr RefNullConst(HeapType::Representation heap_type) {
+ WasmInitExpr expr;
+ expr.kind_ = kRefNullConst;
+ expr.immediate_.heap_type = heap_type;
+ return expr;
+ }
+
+ static WasmInitExpr RttCanon(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kRttCanon;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
+ static WasmInitExpr RttSub(uint32_t index, WasmInitExpr supertype) {
+ WasmInitExpr expr;
+ expr.kind_ = kRttSub;
+ expr.immediate_.index = index;
+ expr.operand_ = std::make_unique<WasmInitExpr>(std::move(supertype));
+ return expr;
+ }
+
+ Immediate immediate() const { return immediate_; }
+ Operator kind() const { return kind_; }
+ WasmInitExpr* operand() const { return operand_.get(); }
+
+ bool operator==(const WasmInitExpr& other) const {
+ if (kind() != other.kind()) return false;
+ switch (kind()) {
+ case kNone:
+ return true;
+ case kGlobalGet:
+ case kRefFuncConst:
+ case kRttCanon:
+ return immediate().index == other.immediate().index;
+ case kI32Const:
+ return immediate().i32_const == other.immediate().i32_const;
+ case kI64Const:
+ return immediate().i64_const == other.immediate().i64_const;
+ case kF32Const:
+ return immediate().f32_const == other.immediate().f32_const;
+ case kF64Const:
+ return immediate().f64_const == other.immediate().f64_const;
+ case kS128Const:
+ return immediate().s128_const == other.immediate().s128_const;
+ case kRefNullConst:
+ return immediate().heap_type == other.immediate().heap_type;
+ case kRttSub:
+ return immediate().index == other.immediate().index &&
+ *operand() == *other.operand();
+ }
+ }
+
+ V8_INLINE bool operator!=(const WasmInitExpr& other) {
+ return !(*this == other);
+ }
+
+ private:
+ Immediate immediate_;
+ Operator kind_;
+ std::unique_ptr<WasmInitExpr> operand_ = nullptr;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const WasmInitExpr& expr);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_INIT_EXPR_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 7f1d8e261f..70492135de 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1475,9 +1475,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("Argument 0 contains results without 'length'");
return;
}
- if (results_len > (enabled_features.has_mv()
- ? i::wasm::kV8MaxWasmFunctionMultiReturns
- : i::wasm::kV8MaxWasmFunctionReturns)) {
+ if (results_len > i::wasm::kV8MaxWasmFunctionReturns) {
thrower.TypeError("Argument 0 contains too many results");
return;
}
@@ -2034,7 +2032,8 @@ Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
Handle<JSObject> object,
const char* str,
FunctionCallback func) {
- return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM);
+ return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM,
+ SideEffectType::kHasNoSideEffect);
}
Handle<String> GetterName(Isolate* isolate, Handle<String> name) {
@@ -2064,7 +2063,8 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
FunctionCallback setter) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> getter_func =
- CreateFunc(isolate, GetterName(isolate, name), getter, false);
+ CreateFunc(isolate, GetterName(isolate, name), getter, false,
+ SideEffectType::kHasNoSideEffect);
Handle<JSFunction> setter_func =
CreateFunc(isolate, SetterName(isolate, name), setter, false);
setter_func->shared().set_length(1);
@@ -2148,11 +2148,12 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::SetInitialMap(isolate, module_constructor, module_map,
module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
- 1);
+ 1, false, NONE, SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, module_constructor, "exports", WebAssemblyModuleExports,
- 1);
+ 1, false, NONE, SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, module_constructor, "customSections",
- WebAssemblyModuleCustomSections, 2);
+ WebAssemblyModuleCustomSections, 2, false, NONE,
+ SideEffectType::kHasNoSideEffect);
JSObject::AddProperty(isolate, module_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Module"), ro_attributes);
@@ -2192,7 +2193,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::SetInitialMap(isolate, table_constructor, table_map, table_proto);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
- InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
+ InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1, false, NONE,
+ SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
if (enabled_features.has_type_reflection()) {
InstallFunc(isolate, table_constructor, "type", WebAssemblyTableType, 1);
@@ -2232,7 +2234,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
i::WASM_GLOBAL_OBJECT_TYPE, WasmGlobalObject::kHeaderSize);
JSFunction::SetInitialMap(isolate, global_constructor, global_map,
global_proto);
- InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
+ InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0,
+ false, NONE, SideEffectType::kHasNoSideEffect);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
if (enabled_features.has_type_reflection()) {
@@ -2243,15 +2246,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Exception
if (enabled_features.has_eh()) {
- Handle<String> exception_name = v8_str(isolate, "Exception");
- Handle<JSFunction> exception_constructor =
- CreateFunc(isolate, exception_name, WebAssemblyException, true,
- SideEffectType::kHasSideEffect);
- exception_constructor->shared().set_length(1);
- JSObject::AddProperty(isolate, webassembly, exception_name,
- exception_constructor, DONT_ENUM);
- // Install the constructor on the context unconditionally so that it is also
- // available when the feature is enabled via the origin trial.
+ Handle<JSFunction> exception_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Exception", WebAssemblyException);
context->set_wasm_exception_constructor(*exception_constructor);
SetDummyInstanceTemplate(isolate, exception_constructor);
JSFunction::EnsureHasInitialMap(exception_constructor);
@@ -2332,7 +2328,6 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<JSObject> webassembly = Handle<JSObject>::cast(webassembly_obj);
// Setup Exception
Handle<String> exception_name = v8_str(isolate, "Exception");
-
if (JSObject::HasOwnProperty(webassembly, exception_name).FromMaybe(true)) {
// The {Exception} constructor already exists, there is nothing more to
// do.
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 78a8f0afd4..9e565db0e8 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -43,8 +43,7 @@ constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
constexpr size_t kV8MaxWasmFunctionSize = 7654321;
constexpr size_t kV8MaxWasmFunctionLocals = 50000;
constexpr size_t kV8MaxWasmFunctionParams = 1000;
-constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
-constexpr size_t kV8MaxWasmFunctionReturns = 1;
+constexpr size_t kV8MaxWasmFunctionReturns = 1000;
constexpr size_t kV8MaxWasmFunctionBrTableSize = 65520;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
constexpr size_t kV8MaxWasmTableSize = 10000000;
@@ -57,7 +56,7 @@ constexpr size_t kV8MaxWasmStructFields = 999;
constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
// Maximum supported by implementation: ((1<<27)-3).
// Reason: total object size in bytes must fit into a Smi, for filler objects.
-constexpr size_t kV8MaxWasmArrayLength = 1u << 24;
+constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
"v8 should not exceed WebAssembly's non-web embedding limits");
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 67f826f2fd..41fa4f6b6b 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -13,7 +13,6 @@
#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -301,6 +300,11 @@ uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
return index;
}
+// static
+const uint32_t WasmModuleBuilder::kNullIndex =
+ std::numeric_limits<uint32_t>::max();
+
+// TODO(9495): Add support for typed function tables and more init. expressions.
uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
DCHECK(allocating_indirect_functions_allowed_);
uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
@@ -310,7 +314,7 @@ uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
}
uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
- indirect_functions_.resize(new_size, WasmElemSegment::kNullIndex);
+ indirect_functions_.resize(new_size, kNullIndex);
uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
if (tables_.empty()) {
// This cannot use {AddTable} because that would flip the
@@ -710,13 +714,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_u8(0); // table index
uint32_t first_element = 0;
while (first_element < indirect_functions_.size() &&
- indirect_functions_[first_element] == WasmElemSegment::kNullIndex) {
+ indirect_functions_[first_element] == kNullIndex) {
first_element++;
}
uint32_t last_element =
static_cast<uint32_t>(indirect_functions_.size() - 1);
while (last_element >= first_element &&
- indirect_functions_[last_element] == WasmElemSegment::kNullIndex) {
+ indirect_functions_[last_element] == kNullIndex) {
last_element--;
}
buffer->write_u8(kExprI32Const); // offset
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index f7b5ff1b76..c1d15a834e 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -297,6 +297,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
return types_[exceptions_[index]].sig;
}
+ static const uint32_t kNullIndex;
+
private:
struct Type {
enum Kind { kFunctionSig, kStructType, kArrayType };
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index c336dc5f7d..acebe8d0e5 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -21,25 +21,23 @@
#include "src/snapshot/snapshot.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-init-expr.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
namespace wasm {
-// static
-const uint32_t WasmElemSegment::kNullIndex;
-
WireBytesRef LazilyGeneratedNames::LookupFunctionName(
- const ModuleWireBytes& wire_bytes, uint32_t function_index,
- Vector<const WasmExport> export_table) const {
+ const ModuleWireBytes& wire_bytes, uint32_t function_index) const {
base::MutexGuard lock(&mutex_);
if (!function_names_) {
function_names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
- function_names_.get(), export_table);
+ function_names_.get());
}
auto it = function_names_->find(function_index);
if (it == function_names_->end()) return WireBytesRef();
@@ -180,7 +178,7 @@ WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
const WasmModule* module) const {
return GetNameOrNull(module->lazily_generated_names.LookupFunctionName(
- *this, function->func_index, VectorOf(module->export_table)));
+ *this, function->func_index));
}
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
@@ -199,6 +197,8 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
: signature_zone(std::move(signature_zone)) {}
+WasmModule::~WasmModule() { DeleteCachedTypeJudgementsForModule(this); }
+
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index bbfcf9623b..d185e67341 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -16,10 +16,11 @@
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/utils/vector.h"
+#include "src/wasm/branch-hint-map.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-init-expr.h"
namespace v8 {
@@ -107,30 +108,34 @@ struct WasmDataSegment {
// Static representation of wasm element segment (table initializer).
struct WasmElemSegment {
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
-
// Construct an active segment.
- WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
- : type(kWasmFuncRef),
+ WasmElemSegment(ValueType type, uint32_t table_index, WasmInitExpr offset)
+ : type(type),
table_index(table_index),
offset(std::move(offset)),
status(kStatusActive) {}
// Construct a passive or declarative segment, which has no table index or
// offset.
- explicit WasmElemSegment(bool declarative)
- : type(kWasmFuncRef),
+ WasmElemSegment(ValueType type, bool declarative)
+ : type(type),
table_index(0),
status(declarative ? kStatusDeclarative : kStatusPassive) {}
- // Used in the {entries} vector to represent a `ref.null` entry in a passive
- // segment.
- V8_EXPORT_PRIVATE static const uint32_t kNullIndex = ~0u;
+ // Construct a passive or declarative segment, which has no table index or
+ // offset.
+ WasmElemSegment()
+ : type(kWasmBottom), table_index(0), status(kStatusActive) {}
+
+ WasmElemSegment(const WasmElemSegment&) = delete;
+ WasmElemSegment(WasmElemSegment&&) V8_NOEXCEPT = default;
+ WasmElemSegment& operator=(const WasmElemSegment&) = delete;
+ WasmElemSegment& operator=(WasmElemSegment&&) V8_NOEXCEPT = default;
ValueType type;
uint32_t table_index;
WasmInitExpr offset;
- std::vector<uint32_t> entries;
+ std::vector<WasmInitExpr> entries;
enum Status {
kStatusActive, // copied automatically during instantiation.
kStatusPassive, // copied explicitly after instantiation.
@@ -188,8 +193,7 @@ struct ModuleWireBytes;
class V8_EXPORT_PRIVATE LazilyGeneratedNames {
public:
WireBytesRef LookupFunctionName(const ModuleWireBytes& wire_bytes,
- uint32_t function_index,
- Vector<const WasmExport> export_table) const;
+ uint32_t function_index) const;
void AddForTesting(int function_index, WireBytesRef name);
@@ -335,6 +339,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmException> exceptions;
std::vector<WasmElemSegment> elem_segments;
std::vector<WasmCompilationHint> compilation_hints;
+ BranchHintInfo branch_hints;
SignatureMap signature_map; // canonicalizing map for signature indexes.
ModuleOrigin origin = kWasmOrigin; // origin of the module
@@ -347,6 +352,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
WasmModule(const WasmModule&) = delete;
+ ~WasmModule();
WasmModule& operator=(const WasmModule&) = delete;
};
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 3da7e1650a..e102fbd97f 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -36,13 +36,14 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
-OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, WasmFunctionData)
OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmTableObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmFunctionData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTypeInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
@@ -55,6 +56,7 @@ CAST_ACCESSOR(WasmMemoryObject)
CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
CAST_ACCESSOR(AsmWasmData)
+CAST_ACCESSOR(WasmFunctionData)
CAST_ACCESSOR(WasmTypeInfo)
CAST_ACCESSOR(WasmStruct)
CAST_ACCESSOR(WasmArray)
@@ -329,18 +331,17 @@ WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
}
CAST_ACCESSOR(WasmExportedFunction)
+// WasmFunctionData
+ACCESSORS(WasmFunctionData, ref, Object, kRefOffset)
+
// WasmExportedFunctionData
ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
kInstanceOffset)
-SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
- kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
SMI_ACCESSORS(WasmExportedFunctionData, wrapper_budget, kWrapperBudgetOffset)
ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
-ACCESSORS(WasmExportedFunctionData, wasm_call_target, Object,
- kWasmCallTargetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
wasm::FunctionSig* WasmExportedFunctionData::sig() const {
@@ -354,7 +355,7 @@ WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
-OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, WasmFunctionData)
CAST_ACCESSOR(WasmJSFunctionData)
SMI_ACCESSORS(WasmJSFunctionData, serialized_return_count,
kSerializedReturnCountOffset)
@@ -362,7 +363,6 @@ SMI_ACCESSORS(WasmJSFunctionData, serialized_parameter_count,
kSerializedParameterCountOffset)
ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
-ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmJSFunctionData, wasm_to_js_wrapper_code, Code,
kWasmToJsWrapperCodeOffset)
@@ -416,13 +416,27 @@ wasm::StructType* WasmStruct::type(Map map) {
wasm::StructType* WasmStruct::GcSafeType(Map map) {
DCHECK_EQ(WASM_STRUCT_TYPE, map.instance_type());
HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
- MapWord map_word = raw.map_word();
+ MapWord map_word = raw.map_word(kRelaxedLoad);
HeapObject forwarded =
map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
Foreign foreign = Foreign::cast(forwarded);
return reinterpret_cast<wasm::StructType*>(foreign.foreign_address());
}
+int WasmStruct::Size(const wasm::StructType* type) {
+ // Object size must fit into a Smi (because of filler objects), and its
+ // computation must not overflow.
+ STATIC_ASSERT(Smi::kMaxValue <= kMaxInt);
+ DCHECK_LE(type->total_fields_size(), Smi::kMaxValue - kHeaderSize);
+ return std::max(kHeaderSize + static_cast<int>(type->total_fields_size()),
+ Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
+}
+
+int WasmStruct::GcSafeSize(Map map) {
+ wasm::StructType* type = GcSafeType(map);
+ return Size(type);
+}
+
wasm::StructType* WasmStruct::type() const { return type(map()); }
ObjectSlot WasmStruct::RawField(int raw_offset) {
@@ -439,7 +453,7 @@ wasm::ArrayType* WasmArray::type(Map map) {
wasm::ArrayType* WasmArray::GcSafeType(Map map) {
DCHECK_EQ(WASM_ARRAY_TYPE, map.instance_type());
HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
- MapWord map_word = raw.map_word();
+ MapWord map_word = raw.map_word(kRelaxedLoad);
HeapObject forwarded =
map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
Foreign foreign = Foreign::cast(forwarded);
@@ -460,12 +474,6 @@ int WasmArray::GcSafeSizeFor(Map map, int length) {
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
-
- // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
-#endif
-
// Due to the type-specific pointer tags for external pointers, we need to
// allocate an entry in the table here even though it will just store nullptr.
AllocateExternalPointerEntries(isolate);
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index d4e7cb65a1..718124debf 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -226,33 +226,21 @@ MaybeHandle<String> WasmModuleObject::GetFunctionNameOrNull(
wasm::WireBytesRef name =
module_object->module()->lazily_generated_names.LookupFunctionName(
wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()),
- func_index, VectorOf(module_object->module()->export_table));
+ func_index);
if (!name.is_set()) return {};
return ExtractUtf8StringFromModuleBytes(isolate, module_object, name,
kNoInternalize);
}
-Handle<String> WasmModuleObject::GetFunctionName(
- Isolate* isolate, Handle<WasmModuleObject> module_object,
- uint32_t func_index) {
- MaybeHandle<String> name =
- GetFunctionNameOrNull(isolate, module_object, func_index);
- if (!name.is_null()) return name.ToHandleChecked();
- EmbeddedVector<char, 32> buffer;
- DCHECK_GE(func_index, module_object->module()->num_imported_functions);
- int length = SNPrintF(buffer, "func%u", func_index);
- return isolate->factory()
- ->NewStringFromOneByte(Vector<uint8_t>::cast(buffer.SubVector(0, length)))
- .ToHandleChecked();
-}
-
-Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
- uint32_t func_index) {
+Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(int func_index) {
+ if (func_index == wasm::kAnonymousFuncIndex) {
+ return Vector<const uint8_t>({nullptr, 0});
+ }
DCHECK_GT(module()->functions.size(), func_index);
wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
wasm::WireBytesRef name_ref =
- module()->lazily_generated_names.LookupFunctionName(
- wire_bytes, func_index, VectorOf(module()->export_table));
+ module()->lazily_generated_names.LookupFunctionName(wire_bytes,
+ func_index);
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
return Vector<const uint8_t>::cast(name);
}
@@ -454,21 +442,18 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
case wasm::HeapType::kEq:
case wasm::HeapType::kData:
case wasm::HeapType::kI31:
- // TODO(7748): Implement once we have a story for struct/arrays/i31ref in
- // JS.
- UNIMPLEMENTED();
+ // TODO(7748): Implement once we have struct/arrays/i31ref tables.
+ UNREACHABLE();
case wasm::HeapType::kBottom:
UNREACHABLE();
default:
DCHECK(!table->instance().IsUndefined());
- if (WasmInstanceObject::cast(table->instance())
- .module()
- ->has_signature(entry_index)) {
- SetFunctionTableEntry(isolate, table, entries, entry_index, entry);
- return;
- }
- // TODO(7748): Implement once we have a story for struct/arrays in JS.
- UNIMPLEMENTED();
+ // TODO(7748): Relax this once we have struct/array/i31ref tables.
+ DCHECK(WasmInstanceObject::cast(table->instance())
+ .module()
+ ->has_signature(table->type().ref_index()));
+ SetFunctionTableEntry(isolate, table, entries, entry_index, entry);
+ return;
}
}
@@ -509,18 +494,16 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
UNREACHABLE();
default:
DCHECK(!table->instance().IsUndefined());
- if (WasmInstanceObject::cast(table->instance())
- .module()
- ->has_signature(entry_index)) {
- if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
- WasmJSFunction::IsWasmJSFunction(*entry) ||
- WasmCapiFunction::IsWasmCapiFunction(*entry)) {
- return entry;
- }
- break;
+ // TODO(7748): Relax this once we have struct/array/i31ref tables.
+ DCHECK(WasmInstanceObject::cast(table->instance())
+ .module()
+ ->has_signature(table->type().ref_index()));
+ if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmJSFunction::IsWasmJSFunction(*entry) ||
+ WasmCapiFunction::IsWasmCapiFunction(*entry)) {
+ return entry;
}
- // TODO(7748): Implement once we have a story for struct/arrays in JS.
- UNIMPLEMENTED();
+ break;
}
// {entry} is not a valid entry in the table. It has to be a placeholder
@@ -1898,30 +1881,18 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
(export_wrapper->is_builtin() &&
export_wrapper->builtin_index() == Builtins::kGenericJSToWasmWrapper));
int num_imported_functions = instance->module()->num_imported_functions;
- int jump_table_offset = -1;
- if (func_index >= num_imported_functions) {
- uint32_t jump_table_diff =
- instance->module_object().native_module()->GetJumpTableOffset(
- func_index);
- DCHECK_GE(kMaxInt, jump_table_diff);
- jump_table_offset = static_cast<int>(jump_table_diff);
- }
+ Handle<Object> ref =
+ func_index >= num_imported_functions
+ ? instance
+ : handle(instance->imported_function_refs().get(func_index), isolate);
+
Factory* factory = isolate->factory();
const wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
- Handle<Foreign> sig_foreign =
- factory->NewForeign(reinterpret_cast<Address>(sig));
+ Address call_target = instance->GetCallTarget(func_index);
Handle<WasmExportedFunctionData> function_data =
- Handle<WasmExportedFunctionData>::cast(factory->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_wrapper_code(*export_wrapper);
- function_data->set_instance(*instance);
- function_data->set_jump_table_offset(jump_table_offset);
- function_data->set_function_index(func_index);
- function_data->set_signature(*sig_foreign);
- function_data->set_wrapper_budget(wasm::kGenericWrapperBudget);
- function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
- function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
- function_data->set_packed_args_size(0);
+ factory->NewWasmExportedFunctionData(
+ export_wrapper, instance, call_target, ref, func_index,
+ reinterpret_cast<Address>(sig), wasm::kGenericWrapperBudget);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -2042,18 +2013,18 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Handle<Code> wrapper_code =
compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
+ // WasmJSFunctions use on-heap Code objects as call targets, so we can't
+ // cache the target address, unless the WasmJSFunction wraps a
+ // WasmExportedFunction.
+ Address call_target = kNullAddress;
+ if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
+ call_target = WasmExportedFunction::cast(*callable).GetWasmCallTarget();
+ }
+
Factory* factory = isolate->factory();
- Handle<WasmJSFunctionData> function_data = Handle<WasmJSFunctionData>::cast(
- factory->NewStruct(WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_serialized_return_count(return_count);
- function_data->set_serialized_parameter_count(parameter_count);
- function_data->set_serialized_signature(*serialized_sig);
- function_data->set_callable(*callable);
- function_data->set_wrapper_code(*wrapper_code);
- // Use Abort() as a default value (it will never be called if not overwritten
- // below).
- function_data->set_wasm_to_js_wrapper_code(
- isolate->heap()->builtin(Builtins::kAbort));
+ Handle<WasmJSFunctionData> function_data = factory->NewWasmJSFunctionData(
+ call_target, callable, return_count, parameter_count, serialized_sig,
+ wrapper_code);
if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
using CK = compiler::WasmImportCallKind;
@@ -2094,7 +2065,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
JSReceiver WasmJSFunction::GetCallable() const {
- return shared().wasm_js_function_data().callable();
+ return JSReceiver::cast(
+ Tuple2::cast(shared().wasm_js_function_data().ref()).value2());
}
const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
@@ -2256,7 +2228,8 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
if (WasmJSFunction::IsWasmJSFunction(*value)) {
// Since a WasmJSFunction cannot refer to indexed types (definable
- // only in a module), we do not need to use EquivalentTypes().
+ // only in a module), we do not need full function subtyping.
+ // TODO(manoskouk): Change this if wasm types can be exported.
if (!WasmJSFunction::cast(*value).MatchesSignature(
module->signature(expected.ref_index()))) {
*error_message =
@@ -2268,11 +2241,12 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
}
if (WasmCapiFunction::IsWasmCapiFunction(*value)) {
+ // Since a WasmCapiFunction cannot refer to indexed types
+ // (definable only in a module), we do not need full function
+ // subtyping.
+ // TODO(manoskouk): Change this if wasm types can be exported.
if (!WasmCapiFunction::cast(*value).MatchesSignature(
module->signature(expected.ref_index()))) {
- // Since a WasmCapiFunction cannot refer to indexed types
- // (definable in a module), we don't need to invoke
- // IsEquivalentType();
*error_message =
"assigned WasmCapiFunction has to be a subtype of the "
"expected type";
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 47bef60ac7..473c4725cc 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -166,17 +166,11 @@ class WasmModuleObject : public JSObject {
Handle<WasmModuleObject>,
uint32_t func_index);
- // Get the function name of the function identified by the given index.
- // Returns "func[func_index]" if the function is unnamed or the
- // name is not a valid UTF-8 string.
- static Handle<String> GetFunctionName(Isolate*, Handle<WasmModuleObject>,
- uint32_t func_index);
-
// Get the raw bytes of the function name of the function identified by the
// given index.
// Meant to be used for debugging or frame printing.
// Does not allocate, hence gc-safe.
- Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+ Vector<const uint8_t> GetRawFunctionName(int func_index);
// Extract a portion of the wire bytes as UTF-8 string, optionally
// internalized. (Prefer to internalize early if the string will be used for a
@@ -747,19 +741,31 @@ class WasmIndirectFunctionTable : public Struct {
OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
};
+class WasmFunctionData
+ : public TorqueGeneratedWasmFunctionData<WasmFunctionData, Foreign> {
+ public:
+ DECL_ACCESSORS(ref, Object)
+
+ DECL_CAST(WasmFunctionData)
+ DECL_PRINTER(WasmFunctionData)
+
+ TQ_OBJECT_CONSTRUCTORS(WasmFunctionData)
+};
+
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
-class WasmExportedFunctionData : public Struct {
+class WasmExportedFunctionData : public WasmFunctionData {
public:
DECL_ACCESSORS(wrapper_code, Code)
+ // This is the instance that exported the function (which in case of
+ // imported and re-exported functions is different from the instance
+ // where the function is defined -- for the latter see WasmFunctionData::ref).
DECL_ACCESSORS(instance, WasmInstanceObject)
- DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
DECL_ACCESSORS(signature, Foreign)
DECL_INT_ACCESSORS(wrapper_budget)
DECL_ACCESSORS(c_wrapper_code, Object)
- DECL_ACCESSORS(wasm_call_target, Object)
DECL_INT_ACCESSORS(packed_args_size)
inline wasm::FunctionSig* sig() const;
@@ -772,21 +778,22 @@ class WasmExportedFunctionData : public Struct {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
- HeapObject::kHeaderSize,
+ WasmFunctionData::kSize,
TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS)
- OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct);
+ class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(WasmExportedFunctionData, WasmFunctionData);
};
// Information for a WasmJSFunction which is referenced as the function data of
// the SharedFunctionInfo underlying the function. For details please see the
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
-class WasmJSFunctionData : public Struct {
+class WasmJSFunctionData : public WasmFunctionData {
public:
DECL_INT_ACCESSORS(serialized_return_count)
DECL_INT_ACCESSORS(serialized_parameter_count)
DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
- DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
@@ -797,10 +804,12 @@ class WasmJSFunctionData : public Struct {
DECL_VERIFIER(WasmJSFunctionData)
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ DEFINE_FIELD_OFFSET_CONSTANTS(WasmFunctionData::kSize,
TORQUE_GENERATED_WASM_JS_FUNCTION_DATA_FIELDS)
- OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
+ class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(WasmJSFunctionData, WasmFunctionData);
};
class WasmScript : public AllStatic {
@@ -915,6 +924,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
static inline wasm::StructType* type(Map map);
inline wasm::StructType* type() const;
static inline wasm::StructType* GcSafeType(Map map);
+ static inline int Size(const wasm::StructType* type);
+ static inline int GcSafeSize(Map map);
inline ObjectSlot RawField(int raw_offset);
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index adcf63ba87..13911e590d 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -11,22 +11,31 @@ type ManagedWasmNativeModule extends Foreign
extern class WasmInstanceObject extends JSObject;
-extern class WasmExportedFunctionData extends Struct {
+@generateCppClass
+extern class WasmFunctionData extends Foreign {
+ // This is the "reference" value that must be passed along in the "instance"
+ // register when calling the given function. It is either the target instance,
+ // or a pair holding the target instance and the callable; currently the
+ // latter is the case when the function being called is defined in JavaScript
+ // or via the C-API.
+ // For imported functions, this value equals the respective entry in
+ // the module's imported_function_refs array.
+ ref: WasmInstanceObject|Tuple2;
+}
+
+extern class WasmExportedFunctionData extends WasmFunctionData {
wrapper_code: Code;
instance: WasmInstanceObject;
- jump_table_offset: Smi;
function_index: Smi;
signature: Foreign;
wrapper_budget: Smi;
// The remaining fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
c_wrapper_code: Object;
- wasm_call_target: Smi|Foreign;
packed_args_size: Smi;
}
-extern class WasmJSFunctionData extends Struct {
- callable: JSReceiver;
+extern class WasmJSFunctionData extends WasmFunctionData {
wrapper_code: Code;
wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
@@ -34,6 +43,7 @@ extern class WasmJSFunctionData extends Struct {
serialized_signature: PodArrayOfWasmValueType;
}
+// TODO(jkummerow): Derive from WasmFunctionData.
@export
class WasmCapiFunctionData extends HeapObject {
call_target: RawPtr;
@@ -107,6 +117,8 @@ extern class AsmWasmData extends Struct {
extern class WasmTypeInfo extends Foreign {
supertypes: FixedArray;
subtypes: ArrayList;
+ // In bytes, used for struct allocation.
+ instance_size: Smi;
}
@generateCppClass
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index 6b124b2dbc..bc14a4adef 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -162,6 +162,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(CallRef, "call_ref")
CASE_OP(ReturnCallRef, "return_call_ref")
CASE_OP(BrOnNull, "br_on_null")
+ CASE_OP(BrOnNonNull, "br_on_non_null")
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(SelectWithType, "select")
@@ -400,6 +401,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(RefTest, "ref.test")
CASE_OP(RefCast, "ref.cast")
CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(BrOnCastFail, "br_on_cast_fail")
CASE_OP(RefIsFunc, "ref.is_func")
CASE_OP(RefIsData, "ref.is_data")
CASE_OP(RefIsI31, "ref.is_i31")
@@ -629,16 +631,12 @@ constexpr const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
case kNumericPrefix:
return impl::kCachedSigs[impl::kNumericExprSigTable[opcode & 0xFF]];
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE(); // invalid prefix.
-#else
- return nullptr;
-#endif
}
}
constexpr const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
- CONSTEXPR_DCHECK(opcode < impl::kSimpleAsmjsExprSigTable.size());
+ DCHECK_GT(impl::kSimpleAsmjsExprSigTable.size(), opcode);
return impl::kCachedSigs[impl::kSimpleAsmjsExprSigTable[opcode]];
}
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 9461807751..e4c0d19a05 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -35,9 +35,6 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
// https://chromium-review.googlesource.com/c/v8/v8/+/2413251).
bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
const WasmFeatures& enabled_features) {
- if (!enabled_features.has_mv() && sig->return_count() > 1) {
- return false;
- }
for (auto type : sig->all()) {
// TODO(7748): Allow structs, arrays, and rtts when their JS-interaction is
// decided on.
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 909cacadd2..5de6892124 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -52,6 +52,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(Delegate, 0x18, _ /* eh_prototype */) \
V(CatchAll, 0x19, _ /* eh_prototype */) \
V(BrOnNull, 0xd4, _ /* gc prototype */) \
+ V(BrOnNonNull, 0xd6, _ /* gc prototype */) \
V(NopForTestingUnsupportedInLiftoff, 0x16, _)
// Constants, locals, globals, and calls.
@@ -252,37 +253,37 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
// These opcodes are not spec'ed (or visible) externally; the idea is
// to use unused ranges for internal purposes.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
- V(F64Acos, 0xc5, d_d) \
- V(F64Asin, 0xc6, d_d) \
- V(F64Atan, 0xc7, d_d) \
- V(F64Cos, 0xc8, d_d) \
- V(F64Sin, 0xc9, d_d) \
- V(F64Tan, 0xca, d_d) \
- V(F64Exp, 0xcb, d_d) \
- V(F64Log, 0xcc, d_d) \
- V(F64Atan2, 0xcd, d_dd) \
- V(F64Pow, 0xce, d_dd) \
- V(F64Mod, 0xcf, d_dd) \
+ V(F64Acos, 0xdc, d_d) \
+ V(F64Asin, 0xdd, d_d) \
+ V(F64Atan, 0xde, d_d) \
+ V(F64Cos, 0xdf, d_d) \
+ V(F64Sin, 0xe0, d_d) \
+ V(F64Tan, 0xe1, d_d) \
+ V(F64Exp, 0xe2, d_d) \
+ V(F64Log, 0xe3, d_d) \
+ V(F64Atan2, 0xe4, d_dd) \
+ V(F64Pow, 0xe5, d_dd) \
+ V(F64Mod, 0xe6, d_dd) \
V(I32AsmjsDivS, 0xe7, i_ii) \
V(I32AsmjsDivU, 0xe8, i_ii) \
V(I32AsmjsRemS, 0xe9, i_ii) \
- V(I32AsmjsRemU, 0xd6, i_ii) \
- V(I32AsmjsLoadMem8S, 0xd7, i_i) \
- V(I32AsmjsLoadMem8U, 0xd8, i_i) \
- V(I32AsmjsLoadMem16S, 0xd9, i_i) \
- V(I32AsmjsLoadMem16U, 0xda, i_i) \
- V(I32AsmjsLoadMem, 0xdb, i_i) \
- V(F32AsmjsLoadMem, 0xdc, f_i) \
- V(F64AsmjsLoadMem, 0xdd, d_i) \
- V(I32AsmjsStoreMem8, 0xde, i_ii) \
- V(I32AsmjsStoreMem16, 0xdf, i_ii) \
- V(I32AsmjsStoreMem, 0xe0, i_ii) \
- V(F32AsmjsStoreMem, 0xe1, f_if) \
- V(F64AsmjsStoreMem, 0xe2, d_id) \
- V(I32AsmjsSConvertF32, 0xe3, i_f) \
- V(I32AsmjsUConvertF32, 0xe4, i_f) \
- V(I32AsmjsSConvertF64, 0xe5, i_d) \
- V(I32AsmjsUConvertF64, 0xe6, i_d)
+ V(I32AsmjsRemU, 0xea, i_ii) \
+ V(I32AsmjsLoadMem8S, 0xeb, i_i) \
+ V(I32AsmjsLoadMem8U, 0xec, i_i) \
+ V(I32AsmjsLoadMem16S, 0xed, i_i) \
+ V(I32AsmjsLoadMem16U, 0xee, i_i) \
+ V(I32AsmjsLoadMem, 0xef, i_i) \
+ V(F32AsmjsLoadMem, 0xf0, f_i) \
+ V(F64AsmjsLoadMem, 0xf1, d_i) \
+ V(I32AsmjsStoreMem8, 0xf2, i_ii) \
+ V(I32AsmjsStoreMem16, 0xf3, i_ii) \
+ V(I32AsmjsStoreMem, 0xf4, i_ii) \
+ V(F32AsmjsStoreMem, 0xf5, f_if) \
+ V(F64AsmjsStoreMem, 0xf6, d_id) \
+ V(I32AsmjsSConvertF32, 0xf7, i_f) \
+ V(I32AsmjsUConvertF32, 0xf8, i_f) \
+ V(I32AsmjsSConvertF64, 0xf9, i_d) \
+ V(I32AsmjsUConvertF64, 0xfa, i_d)
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd00, s_i) \
@@ -670,6 +671,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RefTest, 0xfb40, _) \
V(RefCast, 0xfb41, _) \
V(BrOnCast, 0xfb42, _) \
+ V(BrOnCastFail, 0xfb43, _) \
V(RefIsFunc, 0xfb50, _) \
V(RefIsData, 0xfb51, _) \
V(RefIsI31, 0xfb52, _) \
@@ -759,10 +761,10 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(s_is, kWasmS128, kWasmI32, kWasmS128)
#define FOREACH_PREFIX(V) \
+ V(GC, 0xfb) \
V(Numeric, 0xfc) \
V(Simd, 0xfd) \
- V(Atomic, 0xfe) \
- V(GC, 0xfb)
+ V(Atomic, 0xfe)
enum WasmOpcode {
// Declare expression opcodes.
@@ -801,129 +803,6 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static inline const char* TrapReasonMessage(TrapReason);
};
-// Representation of an initializer expression.
-class WasmInitExpr {
- public:
- enum Operator {
- kNone,
- kGlobalGet,
- kI32Const,
- kI64Const,
- kF32Const,
- kF64Const,
- kS128Const,
- kRefNullConst,
- kRefFuncConst,
- kRttCanon,
- kRttSub
- };
-
- union Immediate {
- int32_t i32_const;
- int64_t i64_const;
- float f32_const;
- double f64_const;
- std::array<uint8_t, kSimd128Size> s128_const;
- uint32_t index;
- HeapType::Representation heap_type;
- };
-
- WasmInitExpr() : kind_(kNone) { immediate_.i32_const = 0; }
- explicit WasmInitExpr(int32_t v) : kind_(kI32Const) {
- immediate_.i32_const = v;
- }
- explicit WasmInitExpr(int64_t v) : kind_(kI64Const) {
- immediate_.i64_const = v;
- }
- explicit WasmInitExpr(float v) : kind_(kF32Const) {
- immediate_.f32_const = v;
- }
- explicit WasmInitExpr(double v) : kind_(kF64Const) {
- immediate_.f64_const = v;
- }
- explicit WasmInitExpr(uint8_t v[kSimd128Size]) : kind_(kS128Const) {
- base::Memcpy(immediate_.s128_const.data(), v, kSimd128Size);
- }
-
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInitExpr);
-
- static WasmInitExpr GlobalGet(uint32_t index) {
- WasmInitExpr expr;
- expr.kind_ = kGlobalGet;
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RefFuncConst(uint32_t index) {
- WasmInitExpr expr;
- expr.kind_ = kRefFuncConst;
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RefNullConst(HeapType::Representation heap_type) {
- WasmInitExpr expr;
- expr.kind_ = kRefNullConst;
- expr.immediate_.heap_type = heap_type;
- return expr;
- }
-
- static WasmInitExpr RttCanon(uint32_t index) {
- WasmInitExpr expr;
- expr.kind_ = kRttCanon;
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RttSub(uint32_t index, WasmInitExpr supertype) {
- WasmInitExpr expr;
- expr.kind_ = kRttSub;
- expr.immediate_.index = index;
- expr.operand_ = std::make_unique<WasmInitExpr>(std::move(supertype));
- return expr;
- }
-
- Immediate immediate() const { return immediate_; }
- Operator kind() const { return kind_; }
- WasmInitExpr* operand() const { return operand_.get(); }
-
- bool operator==(const WasmInitExpr& other) const {
- if (kind() != other.kind()) return false;
- switch (kind()) {
- case kNone:
- return true;
- case kGlobalGet:
- case kRefFuncConst:
- case kRttCanon:
- return immediate().index == other.immediate().index;
- case kI32Const:
- return immediate().i32_const == other.immediate().i32_const;
- case kI64Const:
- return immediate().i64_const == other.immediate().i64_const;
- case kF32Const:
- return immediate().f32_const == other.immediate().f32_const;
- case kF64Const:
- return immediate().f64_const == other.immediate().f64_const;
- case kS128Const:
- return immediate().s128_const == other.immediate().s128_const;
- case kRefNullConst:
- return immediate().heap_type == other.immediate().heap_type;
- case kRttSub:
- return immediate().index == other.immediate().index &&
- *operand() == *other.operand();
- }
- }
-
- V8_INLINE bool operator!=(const WasmInitExpr& other) {
- return !(*this == other);
- }
-
- private:
- Immediate immediate_;
- Operator kind_;
- std::unique_ptr<WasmInitExpr> operand_ = nullptr;
-};
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index b2e6f0c4d8..a47e420cb1 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -567,6 +567,8 @@ class CopyAndRelocTask : public JobTask {
void Run(JobDelegate* delegate) override {
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(
+ deserializer_->native_module_);
do {
auto batch = from_queue_->Pop();
if (batch.empty()) break;
@@ -712,11 +714,9 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
constexpr size_t kMaxReservation =
RoundUp<kCodeAlignment>(WasmCodeAllocator::kMaxCodeSpaceSize * 9 / 10);
size_t code_space_size = std::min(kMaxReservation, remaining_code_size_);
- current_code_space_ =
+ std::tie(current_code_space_, current_jump_tables_) =
native_module_->AllocateForDeserializedCode(code_space_size);
DCHECK_EQ(current_code_space_.size(), code_space_size);
- current_jump_tables_ = native_module_->FindJumpTablesForRegion(
- base::AddressRegionOf(current_code_space_));
DCHECK(current_jump_tables_.is_valid());
}
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index b0e8105a60..d2b7e9fe31 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -91,6 +91,26 @@ class TypeJudgementCache {
type_equivalence_cache_.erase(
std::make_tuple(type1, type2, module1, module2));
}
+ void delete_module(const WasmModule* module) {
+ for (auto iterator = type_equivalence_cache_.begin();
+ iterator != type_equivalence_cache_.end();) {
+ if (std::get<2>(*iterator) == module ||
+ std::get<3>(*iterator) == module) {
+ iterator = type_equivalence_cache_.erase(iterator);
+ } else {
+ iterator++;
+ }
+ }
+ for (auto iterator = subtyping_cache_.begin();
+ iterator != subtyping_cache_.end();) {
+ if (std::get<2>(*iterator) == module ||
+ std::get<3>(*iterator) == module) {
+ iterator = subtyping_cache_.erase(iterator);
+ } else {
+ iterator++;
+ }
+ }
+ }
private:
Zone zone_;
@@ -258,14 +278,46 @@ bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
}
}
-// TODO(7748): Expand this with function subtyping when it is introduced.
bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
- return FunctionEquivalentIndices(subtype_index, supertype_index, sub_module,
- super_module);
-}
+ if (!FLAG_experimental_wasm_gc) {
+ return FunctionEquivalentIndices(subtype_index, supertype_index, sub_module,
+ super_module);
+ }
+ const FunctionSig* sub_func = sub_module->types[subtype_index].function_sig;
+ const FunctionSig* super_func =
+ super_module->types[supertype_index].function_sig;
+
+ if (sub_func->parameter_count() != super_func->parameter_count() ||
+ sub_func->return_count() != super_func->return_count()) {
+ return false;
+ }
+
+ TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
+ sub_module, super_module);
+
+ for (uint32_t i = 0; i < sub_func->parameter_count(); i++) {
+ // Contravariance for params.
+ if (!IsSubtypeOf(super_func->parameters()[i], sub_func->parameters()[i],
+ super_module, sub_module)) {
+ TypeJudgementCache::instance()->uncache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ return false;
+ }
+ }
+ for (uint32_t i = 0; i < sub_func->return_count(); i++) {
+ // Covariance for returns.
+ if (!IsSubtypeOf(sub_func->returns()[i], super_func->returns()[i],
+ sub_module, super_module)) {
+ TypeJudgementCache::instance()->uncache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ return false;
+ }
+ }
+ return true;
+}
} // namespace
V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
@@ -403,11 +455,12 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
module2);
}
-ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module) {
- if (a == b) return a;
- if (IsSubtypeOf(a, b, module)) return a;
- if (IsSubtypeOf(b, a, module)) return b;
- return kWasmBottom;
+void DeleteCachedTypeJudgementsForModule(const WasmModule* module) {
+ // Accessing the caches for subtyping and equivalence from multiple background
+ // threads is protected by a lock.
+ base::RecursiveMutexGuard type_cache_access(
+ TypeJudgementCache::instance()->type_cache_mutex());
+ TypeJudgementCache::instance()->delete_module(module);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 0c35f7c470..59e7935d1f 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -60,8 +60,10 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
// - Struct subtyping: Subtype must have at least as many fields as supertype,
// covariance for immutable fields, equivalence for mutable fields.
// - Array subtyping (mutable only) is the equivalence relation.
-// - Function subtyping is the equivalence relation (note: this rule might
-// change in the future to include type variance).
+// - Function subtyping depends on the enabled wasm features: if
+// --experimental-wasm-gc is enabled, then subtyping is computed
+// contravariantly for parameter types and covariantly for return types.
+// Otherwise, the subtyping relation is the equivalence relation.
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -91,11 +93,9 @@ V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
ValueType::Ref(supertype_index, kNonNullable), module);
}
-// Returns the weakest type that is a subtype of both a and b
-// (which is currently always one of a, b, or kWasmBottom).
-// TODO(manoskouk): Update this once we have settled on a type system for
-// reference types.
-ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module);
+// Call this function in {module}'s destructor to avoid spurious cache hits in
+// case another WasmModule gets allocated in the same address later.
+void DeleteCachedTypeJudgementsForModule(const WasmModule* module);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 0a1d2b69e2..faaad18076 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -12,7 +12,7 @@
#include "src/base/memory.h"
#include "src/handles/handles.h"
#include "src/utils/boxed-float.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/value-type.h"
#include "src/zone/zone-containers.h"
namespace v8 {