summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline
diff options
context:
space:
mode:
authorRefael Ackermann <refack@gmail.com>2019-05-28 08:46:21 -0400
committerRefael Ackermann <refack@gmail.com>2019-06-01 09:55:12 -0400
commited74896b1fae1c163b3906163f3bf46326618ddb (patch)
tree7fb05c5a19808e0c5cd95837528e9005999cf540 /deps/v8/src/wasm/baseline
parent2a850cd0664a4eee51f44d0bb8c2f7a3fe444154 (diff)
downloadnode-new-ed74896b1fae1c163b3906163f3bf46326618ddb.tar.gz
deps: update V8 to 7.5.288.22
PR-URL: https://github.com/nodejs/node/pull/27375 Reviewed-By: Michaƫl Zasso <targos@protonmail.com> Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm/baseline')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h30
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h25
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h43
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h11
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc72
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h9
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h11
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h9
-rw-r--r--deps/v8/src/wasm/baseline/ppc/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h9
-rw-r--r--deps/v8/src/wasm/baseline/s390/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h9
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h17
15 files changed, 237 insertions, 24 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 26f63ea302..21ec7fdeff 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -139,6 +139,27 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
+template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
+ Condition),
+ void (Assembler::*op_with_carry)(Register, Register, const Operand&,
+ SBit, Condition)>
+inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t imm) {
+ UseScratchRegisterScope temps(assm);
+ Register scratch = dst.low_gp();
+ bool can_use_dst = dst.low_gp() != lhs.high_gp();
+ if (!can_use_dst) {
+ scratch = temps.Acquire();
+ }
+ (assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
+ // Top half of the immediate sign extended, either 0 or -1.
+ (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(),
+ Operand(imm < 0 ? -1 : 0), LeaveCC, al);
+ if (!can_use_dst) {
+ assm->mov(dst.low_gp(), scratch);
+ }
+}
+
template <void (TurboAssembler::*op)(Register, Register, Register, Register,
Register),
bool is_left_shift>
@@ -658,6 +679,10 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef FP64_UNOP
#undef FP64_BINOP
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ add(dst, lhs, Operand(imm));
+}
+
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src);
return true;
@@ -790,6 +815,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>(this, dst, lhs, rhs);
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 0c093f2dcd..0fe0237653 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -345,12 +345,20 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
CPURegister src = CPURegister::no_reg();
switch (value.type()) {
case kWasmI32:
- src = temps.AcquireW();
- Mov(src.W(), value.to_i32());
+ if (value.to_i32() == 0) {
+ src = wzr;
+ } else {
+ src = temps.AcquireW();
+ Mov(src.W(), value.to_i32());
+ }
break;
case kWasmI64:
- src = temps.AcquireX();
- Mov(src.X(), value.to_i64());
+ if (value.to_i64() == 0) {
+ src = xzr;
+ } else {
+ src = temps.AcquireX();
+ Mov(src.X(), value.to_i64());
+ }
break;
default:
// We do not track f32 and f64 constants, hence they are unreachable.
@@ -572,6 +580,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w);
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ Add(dst.W(), lhs.W(), Immediate(imm));
+}
+
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 91e2139d44..8c5d8c918d 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -523,6 +523,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ if (lhs != dst) {
+ lea(dst, Operand(lhs, imm));
+ } else {
+ add(dst, Immediate(imm));
+ }
+}
+
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) {
// Default path.
@@ -793,6 +801,36 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
}
+
+template <void (Assembler::*op)(Register, const Immediate&),
+ void (Assembler::*op_with_carry)(Register, int32_t)>
+inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t imm) {
+ // First, compute the low half of the result, potentially into a temporary dst
+ // register if {dst.low_gp()} equals any register we need to
+ // keep alive for computing the upper half.
+ LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
+ Register dst_low = keep_alive.has(dst.low_gp())
+ ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
+ : dst.low_gp();
+
+ if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
+ (assm->*op)(dst_low, Immediate(imm));
+
+ // Now compute the upper half, while keeping alive the previous result.
+ keep_alive = LiftoffRegList::ForRegs(dst_low);
+ Register dst_high = keep_alive.has(dst.high_gp())
+ ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
+ : dst.high_gp();
+
+ if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
+ // Top half of the immediate sign extended, either 0 or -1.
+ (assm->*op_with_carry)(dst_high, imm < 0 ? -1 : 0);
+
+ // If necessary, move result into the right registers.
+ LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
+ if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+}
} // namespace liftoff
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -800,6 +838,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(this, dst, lhs, rhs);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 7ac25bf252..a3e4e4ce07 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -389,6 +389,7 @@ class LiftoffAssembler : public TurboAssembler {
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_add(Register dst, Register lhs, int32_t imm);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
@@ -419,6 +420,8 @@ class LiftoffAssembler : public TurboAssembler {
// i64 binops.
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -481,6 +484,14 @@ class LiftoffAssembler : public TurboAssembler {
}
}
+ inline void emit_ptrsize_add(Register dst, Register lhs, int32_t imm) {
+ if (kSystemPointerSize == 8) {
+ emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
+ } else {
+ emit_i32_add(dst, lhs, imm);
+ }
+ }
+
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 5ad9dc7315..d539fe481e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -785,6 +785,37 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
+ template <ValueType src_type, ValueType result_type, typename EmitFn,
+ typename EmitFnImm>
+ void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
+ static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass result_rc = reg_class_for(result_type);
+
+ LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
+ // Check if the RHS is an immediate.
+ if (rhs_slot.loc() == LiftoffAssembler::VarState::kIntConst) {
+ __ cache_state()->stack_state.pop_back();
+ int32_t imm = rhs_slot.i32_const();
+
+ LiftoffRegister lhs = __ PopToRegister();
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {lhs})
+ : __ GetUnusedRegister(result_rc);
+
+ fnImm(dst, lhs, imm);
+ __ PushRegister(result_type, dst);
+ } else {
+ // The RHS was not an immediate.
+ LiftoffRegister rhs = __ PopToRegister();
+ LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {lhs, rhs})
+ : __ GetUnusedRegister(result_rc);
+ fn(dst, lhs, rhs);
+ __ PushRegister(result_type, dst);
+ }
+ }
+
template <ValueType src_type, ValueType result_type, typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
@@ -830,12 +861,30 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
});
+#define CASE_I32_BINOPI(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpImm<kWasmI32, kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
+ }, \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), imm); \
+ });
#define CASE_I64_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
});
+#define CASE_I64_BINOPI(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpImm<kWasmI64, kWasmI64>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst, lhs, rhs); \
+ }, \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ __ emit_##fn(dst, lhs, imm); \
+ });
#define CASE_FLOAT_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasm##type, kWasm##type>( \
@@ -891,7 +940,7 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
});
switch (opcode) {
- CASE_I32_BINOP(I32Add, i32_add)
+ CASE_I32_BINOPI(I32Add, i32_add)
CASE_I32_BINOP(I32Sub, i32_sub)
CASE_I32_BINOP(I32Mul, i32_mul)
CASE_I32_BINOP(I32And, i32_and)
@@ -910,7 +959,7 @@ class LiftoffCompiler {
CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
- CASE_I64_BINOP(I64Add, i64_add)
+ CASE_I64_BINOPI(I64Add, i64_add)
CASE_I64_BINOP(I64Sub, i64_sub)
CASE_I64_BINOP(I64Mul, i64_mul)
CASE_I64_CMPOP(I64Eq, kEqual)
@@ -1060,7 +1109,9 @@ class LiftoffCompiler {
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_BINOP
+#undef CASE_I32_BINOPI
#undef CASE_I64_BINOP
+#undef CASE_I64_BINOPI
#undef CASE_FLOAT_BINOP
#undef CASE_I32_CMPOP
#undef CASE_I64_CMPOP
@@ -1553,8 +1604,7 @@ class LiftoffCompiler {
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
- __ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
- __ emit_ptrsize_add(index, index, tmp);
+ __ emit_ptrsize_add(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
@@ -1736,6 +1786,9 @@ class LiftoffCompiler {
if (imm.sig->return_count() > 1) {
return unsupported(decoder, "multi-return");
}
+ if (imm.table_index != 0) {
+ return unsupported(decoder, "table index != 0");
+ }
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return")) {
@@ -1980,8 +2033,8 @@ class LiftoffCompiler {
} // namespace
WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
- CompilationEnv* env, const FunctionBody& func_body, Counters* counters,
- WasmFeatures* detected) {
+ AccountingAllocator* allocator, CompilationEnv* env,
+ const FunctionBody& func_body, Counters* counters, WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
@@ -1989,7 +2042,7 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
compile_timer.Start();
}
- Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
+ Zone zone(allocator, "LiftoffCompilationZone");
const WasmModule* module = env ? env->module : nullptr;
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
@@ -2004,12 +2057,12 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) {
compiler->OnFirstError(&decoder);
- return WasmCompilationResult{decoder.error()};
+ return WasmCompilationResult{};
}
if (!compiler->ok()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
- return WasmCompilationResult{WasmError{0, "Liftoff bailout"}};
+ return WasmCompilationResult{};
}
counters->liftoff_compiled_functions()->Increment();
@@ -2029,6 +2082,7 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
result.protected_instructions = compiler->GetProtectedInstructions();
result.frame_slot_count = compiler->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
+ result.result_tier = ExecutionTier::kLiftoff;
DCHECK(result.succeeded());
return result;
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index e1fb79138f..1ae0b8e83a 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -11,6 +11,7 @@
namespace v8 {
namespace internal {
+class AccountingAllocator;
class Counters;
namespace wasm {
@@ -22,16 +23,14 @@ struct WasmFeatures;
class LiftoffCompilationUnit final {
public:
- explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit) {}
+ LiftoffCompilationUnit() = default;
- WasmCompilationResult ExecuteCompilation(CompilationEnv*, const FunctionBody&,
+ WasmCompilationResult ExecuteCompilation(AccountingAllocator*,
+ CompilationEnv*, const FunctionBody&,
Counters*,
WasmFeatures* detected_features);
private:
- WasmCompilationUnit* const wasm_unit_;
-
DISALLOW_COPY_AND_ASSIGN(LiftoffCompilationUnit);
};
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
index b455d9ef29..cab3679d65 100644
--- a/deps/v8/src/wasm/baseline/mips/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips/OWNERS
@@ -1,3 +1 @@
-arikalo@wavecomp.com
-prudic@wavecomp.com
-skovacevic@wavecomp.com
+xwafish@gmail.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4fecffb97d..530118c526 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -585,6 +585,10 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
lw(reg, liftoff::GetHalfStackSlot(index, half));
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ Addu(dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -675,6 +679,13 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
+ lhs.high_gp(), imm,
+ kScratchReg, kScratchReg2);
+}
+
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
index b455d9ef29..cab3679d65 100644
--- a/deps/v8/src/wasm/baseline/mips64/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips64/OWNERS
@@ -1,3 +1 @@
-arikalo@wavecomp.com
-prudic@wavecomp.com
-skovacevic@wavecomp.com
+xwafish@gmail.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 3a963cefd6..7bfa172def 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -500,6 +500,10 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ Addu(dst, lhs, Operand(imm));
+}
+
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -590,6 +594,11 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ Daddu(dst.gp(), lhs.gp(), Operand(imm));
+}
+
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
diff --git a/deps/v8/src/wasm/baseline/ppc/OWNERS b/deps/v8/src/wasm/baseline/ppc/OWNERS
new file mode 100644
index 0000000000..85b6cb38f0
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/ppc/OWNERS
@@ -0,0 +1,4 @@
+jyan@ca.ibm.com
+joransiu@ca.ibm.com
+michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index d6c372e80f..b7b17afcfb 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -231,6 +231,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ BAILOUT("i64_add");
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ BAILOUT("i32_add");
+}
+
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
BAILOUT("i32_shr");
}
diff --git a/deps/v8/src/wasm/baseline/s390/OWNERS b/deps/v8/src/wasm/baseline/s390/OWNERS
new file mode 100644
index 0000000000..85b6cb38f0
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/s390/OWNERS
@@ -0,0 +1,4 @@
+jyan@ca.ibm.com
+joransiu@ca.ibm.com
+michael_dawson@ca.ibm.com
+miladfar@ca.ibm.com
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 9680d9664f..1cb8e97d89 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -231,6 +231,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ BAILOUT("i64_add");
+}
+
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ BAILOUT("i32_add");
+}
+
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
BAILOUT("i32_shr");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 60924bfc1a..ccd352df7e 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -434,6 +434,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
+void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+ if (lhs != dst) {
+ leal(dst, Operand(lhs, imm));
+ } else {
+ addl(dst, Immediate(imm));
+ }
+}
+
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) {
// Default path.
@@ -704,6 +712,15 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
}
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ if (lhs.gp() != dst.gp()) {
+ leaq(dst.gp(), Operand(lhs.gp(), imm));
+ } else {
+ addq(dst.gp(), Immediate(imm));
+ }
+}
+
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (dst.gp() == rhs.gp()) {