summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc')
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc721
1 files changed, 437 insertions, 284 deletions
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index daae9502ce..22f52dfcb6 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -23,7 +24,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ tasm()->
+#define __ masm()->
// Adds Arm64-specific methods to convert InstructionOperands.
class Arm64OperandConverter final : public InstructionOperandConverter {
@@ -223,12 +224,25 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
#endif // V8_ENABLE_WEBASSEMBLY
return Operand(constant.ToInt64());
case Constant::kFloat32:
- return Operand(Operand::EmbeddedNumber(constant.ToFloat32()));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
- case Constant::kCompressedHeapObject: // Fall through.
+ case Constant::kCompressedHeapObject: {
+ RootIndex root_index;
+ if (gen_->isolate()->roots_table().IsRootHandle(constant.ToHeapObject(),
+ &root_index)) {
+ CHECK(COMPRESS_POINTERS_BOOL);
+ CHECK(V8_STATIC_ROOTS_BOOL || !gen_->isolate()->bootstrapper());
+ Tagged_t ptr =
+ MacroAssemblerBase::ReadOnlyRootPtr(root_index, gen_->isolate());
+ CHECK(Assembler::IsImmAddSub(ptr));
+ return Immediate(ptr);
+ }
+
+ return Operand(constant.ToHeapObject());
+ }
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kRpoNumber:
@@ -237,13 +251,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
- MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
+ MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
- return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
}
- MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
+ MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
@@ -283,17 +297,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(value_, value_);
+ __ DecompressTagged(value_, value_);
}
- __ CheckPageFlag(
- value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- ne, exit());
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Push<TurboAssembler::kSignLR>(lr, padreg);
+ __ Push<MacroAssembler::kSignLR>(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
@@ -310,7 +323,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
}
if (must_save_lr_) {
- __ Pop<TurboAssembler::kAuthLR>(padreg, lr);
+ __ Pop<MacroAssembler::kAuthLR>(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -416,7 +429,15 @@ class WasmOutOfLineTrap : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ if (gen_->IsWasm() || PointerCompressionIsEnabled()) {
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ } else {
+ // For wasm traps inlined into JavaScript force indirect call if pointer
+ // compression is disabled as it can't be guaranteed that the built-in's
+ // address is close enough for a near call.
+ __ IndirectCall(static_cast<Address>(trap_id),
+ RelocInfo::WASM_STUB_CALL);
+ }
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
gen_->RecordSafepoint(reference_map);
@@ -429,43 +450,49 @@ class WasmOutOfLineTrap : public OutOfLineCode {
class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
public:
- WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
- : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr,
+ TrapId trap_id)
+ : WasmOutOfLineTrap(gen, instr), pc_(pc), trap_id_(trap_id) {}
void Generate() override {
DCHECK(v8_flags.wasm_bounds_checks && !v8_flags.wasm_enforce_bounds_checks);
gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
- GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
+ GenerateWithTrapId(trap_id_);
}
private:
int pc_;
+ TrapId trap_id_;
};
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessProtected) {
- zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
+ if (access_mode == kMemoryAccessProtectedMemOutOfBounds) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr,
+ TrapId::kTrapMemOutOfBounds);
+ } else if (access_mode == kMemoryAccessProtectedNullDereference) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr,
+ TrapId::kTrapNullDereference);
}
}
#else
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+ DCHECK_EQ(kMemoryAccessDirect, AccessModeField::decode(opcode));
}
#endif // V8_ENABLE_WEBASSEMBLY
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
-void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
+void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr,
Arm64OperandConverter i, VectorFormat scalar,
VectorFormat vector) {
VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
- (tasm->*fn)(output, input);
+ (masm->*fn)(output, input);
}
} // namespace
@@ -497,54 +524,124 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_instr(i.Input##reg(2), i.TempRegister(0)); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr, reg) \
- do { \
- Label exchange; \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Bind(&exchange); \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- __ load_instr(i.Output##reg(), i.TempRegister(0)); \
- __ store_instr(i.TempRegister32(1), i.Input##reg(2), i.TempRegister(0)); \
- __ Cbnz(i.TempRegister32(1), &exchange); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(suffix, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Swpal##suffix(i.Input##reg(2), i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label exchange; \
+ __ Bind(&exchange); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ stlxr##suffix(i.TempRegister32(1), i.Input##reg(2), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(1), &exchange); \
+ } \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext, \
- reg) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Bind(&compareExchange); \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- __ load_instr(i.Output##reg(), i.TempRegister(0)); \
- __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \
- __ B(ne, &exit); \
- __ store_instr(i.TempRegister32(1), i.Input##reg(3), i.TempRegister(0)); \
- __ Cbnz(i.TempRegister32(1), &compareExchange); \
- __ Bind(&exit); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(suffix, ext, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ DCHECK_EQ(i.OutputRegister(), i.InputRegister(2)); \
+ CpuFeatureScope scope(masm(), LSE); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Casal##suffix(i.Output##reg(), i.Input##reg(3), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label compareExchange; \
+ Label exit; \
+ __ Bind(&compareExchange); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \
+ __ B(ne, &exit); \
+ __ stlxr##suffix(i.TempRegister32(1), i.Input##reg(3), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(1), &compareExchange); \
+ __ Bind(&exit); \
+ } \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr, reg) \
- do { \
- Label binop; \
- __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Bind(&binop); \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- __ load_instr(i.Output##reg(), i.TempRegister(0)); \
- __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
- __ store_instr(i.TempRegister32(2), i.Temp##reg(1), i.TempRegister(0)); \
- __ Cbnz(i.TempRegister32(2), &binop); \
+#define ASSEMBLE_ATOMIC_SUB(suffix, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ UseScratchRegisterScope temps(masm()); \
+ Register scratch = temps.AcquireSameSizeAs(i.Input##reg(2)); \
+ __ Neg(scratch, i.Input##reg(2)); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Ldaddal##suffix(scratch, i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label binop; \
+ __ Bind(&binop); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ Sub(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
+ __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_AND(suffix, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ UseScratchRegisterScope temps(masm()); \
+ Register scratch = temps.AcquireSameSizeAs(i.Input##reg(2)); \
+ __ Mvn(scratch, i.Input##reg(2)); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ Ldclral##suffix(scratch, i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label binop; \
+ __ Bind(&binop); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ And(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
+ __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(suffix, bin_instr, lse_instr, reg) \
+ do { \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (CpuFeatures::IsSupported(LSE)) { \
+ CpuFeatureScope scope(masm(), LSE); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ lse_instr##suffix(i.Input##reg(2), i.Output##reg(), \
+ MemOperand(i.TempRegister(0))); \
+ } else { \
+ Label binop; \
+ __ Bind(&binop); \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ __ ldaxr##suffix(i.Output##reg(), i.TempRegister(0)); \
+ __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
+ __ stlxr##suffix(i.TempRegister32(2), i.Temp##reg(1), \
+ i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister32(2), &binop); \
+ } \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(tasm(), StackFrame::MANUAL); \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
} while (0)
@@ -557,7 +654,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- UseScratchRegisterScope temps(tasm()); \
+ UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
@@ -577,7 +674,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- UseScratchRegisterScope temps(tasm()); \
+ UseScratchRegisterScope temps(masm()); \
VRegister tmp = temps.AcquireQ(); \
Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
@@ -591,7 +688,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(sp, fp);
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
+ __ Pop<MacroAssembler::kAuthLR>(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
}
@@ -605,7 +702,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -614,10 +711,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) {
- tasm->Claim(stack_slot_delta);
+ masm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- tasm->Drop(-stack_slot_delta);
+ masm->Drop(-stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -626,14 +723,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) {
DCHECK_EQ(first_unused_slot_offset % 2, 0);
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
first_unused_slot_offset);
DCHECK(instr->IsTailCall());
InstructionOperandConverter g(this, instr);
@@ -645,34 +742,14 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
-// Check if the code object is marked for deoptimization. If it is, then it
-// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
-// to:
-// 1. read from memory the word that contains that bit, which can be found in
-// the flags in the referenced {CodeDataContainer} object;
-// 2. test kMarkedForDeoptimizationBit in those flags; and
-// 3. if it is not zero then it jumps to the builtin.
-void CodeGenerator::BailoutIfDeoptimized() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
- int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadTaggedPointerField(
- scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ Ldr(scratch.W(),
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
- Label not_deoptimized;
- __ Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, &not_deoptimized);
- __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
- RelocInfo::CODE_TARGET);
- __ Bind(&not_deoptimized);
-}
+void CodeGenerator::BailoutIfDeoptimized() { __ BailoutIfDeoptimized(); }
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
@@ -724,7 +801,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, constant.rmode());
} else {
Register target = i.InputRegister(0);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
temps.Exclude(x17);
__ Mov(x17, target);
__ Jump(x17);
@@ -756,7 +833,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
temps.Exclude(x17);
__ Mov(x17, reg);
__ Jump(x17);
@@ -769,17 +846,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (v8_flags.debug_code) {
// Check the function's context matches the context argument.
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
- __ LoadTaggedPointerField(
- temp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedField(temp,
+ FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ LoadTaggedPointerField(x2,
- FieldMemOperand(func, JSFunction::kCodeOffset));
- __ CallCodeTObject(x2);
+ __ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -879,7 +955,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
RelocInfo::CODE_TARGET);
}
@@ -947,8 +1023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
@@ -971,19 +1046,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ StoreTaggedField(value, MemOperand(object, offset));
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
- eq, ool->entry());
+ ne, ool->entry());
__ Bind(ool->exit());
break;
}
case kArchAtomicStoreWithWriteBarrier: {
DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ RecordWriteMode mode = RecordWriteModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
Register offset = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -995,7 +1070,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ JumpIfSmi(value, ool->exit());
}
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
- eq, ool->entry());
+ ne, ool->entry());
__ Bind(ool->exit());
break;
}
@@ -1070,39 +1145,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kArm64Float32RoundDown:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundDown:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundUp:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundUp:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float64RoundTiesAway:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundTruncate:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundTruncate:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Float32RoundTiesEven:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS,
kFormat4S);
break;
case kArm64Float64RoundTiesEven:
- EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD,
+ EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD,
kFormat2D);
break;
case kArm64Add:
@@ -1333,14 +1408,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Imod: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Imod32: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW();
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1348,14 +1423,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umod: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Umod32: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireW();
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1669,7 +1744,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64Mod: {
// TODO(turbofan): implement directly.
- FrameScope scope(tasm(), StackFrame::MANUAL);
+ FrameScope scope(masm(), StackFrame::MANUAL);
DCHECK_EQ(d0, i.InputDoubleRegister(0));
DCHECK_EQ(d1, i.InputDoubleRegister(1));
DCHECK_EQ(d0, i.OutputDoubleRegister());
@@ -1907,25 +1982,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldr(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdrDecompressTaggedSigned:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
break;
- case kArm64LdrDecompressTaggedPointer:
- __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- break;
- case kArm64LdrDecompressAnyTagged:
- __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ case kArm64LdrDecompressTagged:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdarDecompressTaggedSigned:
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
i.InputRegister(1), i.TempRegister(0));
break;
- case kArm64LdarDecompressTaggedPointer:
- __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1), i.TempRegister(0));
- break;
- case kArm64LdarDecompressAnyTagged:
- __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1), i.TempRegister(0));
+ case kArm64LdarDecompressTagged:
+ __ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
break;
case kArm64LdrDecodeSandboxedPointer:
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
@@ -1935,6 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StrCompressTagged:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StlrCompressTagged:
@@ -2013,75 +2084,109 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break;
case kAtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(b, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(b, Register32);
break;
case kAtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(h, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(h, Register32);
break;
case kAtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(, Register);
break;
case kAtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(b, UXTB, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(b, UXTB, Register32);
break;
case kAtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(h, UXTH, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
- Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(h, UXTH, Register32);
break;
case kAtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(, UXTW, Register32);
break;
case kArm64Word64AtomicCompareExchangeUint64:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
- break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
- __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
- break; \
- case kAtomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
- break; \
- case kAtomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
- __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
- break; \
- case kAtomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
- break; \
- case kAtomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
- break; \
- case kArm64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register); \
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(, UXTX, Register);
+ break;
+ case kAtomicSubInt8:
+ ASSEMBLE_ATOMIC_SUB(b, Register32);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicSubUint8:
+ ASSEMBLE_ATOMIC_SUB(b, Register32);
+ break;
+ case kAtomicSubInt16:
+ ASSEMBLE_ATOMIC_SUB(h, Register32);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicSubUint16:
+ ASSEMBLE_ATOMIC_SUB(h, Register32);
+ break;
+ case kAtomicSubWord32:
+ ASSEMBLE_ATOMIC_SUB(, Register32);
+ break;
+ case kArm64Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_SUB(, Register);
+ break;
+ case kAtomicAndInt8:
+ ASSEMBLE_ATOMIC_AND(b, Register32);
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicAndUint8:
+ ASSEMBLE_ATOMIC_AND(b, Register32);
+ break;
+ case kAtomicAndInt16:
+ ASSEMBLE_ATOMIC_AND(h, Register32);
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
+ break;
+ case kAtomicAndUint16:
+ ASSEMBLE_ATOMIC_AND(h, Register32);
+ break;
+ case kAtomicAndWord32:
+ ASSEMBLE_ATOMIC_AND(, Register32);
+ break;
+ case kArm64Word64AtomicAndUint64:
+ ASSEMBLE_ATOMIC_AND(, Register);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst, lse_instr) \
+ case kAtomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(b, inst, lse_instr, Register32); \
+ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(b, inst, lse_instr, Register32); \
+ break; \
+ case kAtomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(h, inst, lse_instr, Register32); \
+ __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
+ break; \
+ case kAtomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(h, inst, lse_instr, Register32); \
+ break; \
+ case kAtomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(, inst, lse_instr, Register32); \
+ break; \
+ case kArm64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(, inst, lse_instr, Register); \
break;
- ATOMIC_BINOP_CASE(Add, Add)
- ATOMIC_BINOP_CASE(Sub, Sub)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Orr)
- ATOMIC_BINOP_CASE(Xor, Eor)
+ ATOMIC_BINOP_CASE(Add, Add, Ldaddal)
+ ATOMIC_BINOP_CASE(Or, Orr, Ldsetal)
+ ATOMIC_BINOP_CASE(Xor, Eor, Ldeoral)
#undef ATOMIC_BINOP_CASE
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
@@ -2178,6 +2283,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2).Format(f)); \
break; \
}
+#define SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(Op, Instr, FORMAT) \
+ case Op: { \
+ VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(2).V##FORMAT()); \
+ __ Instr(dst, i.InputSimd128Register(0).V##FORMAT(), \
+ i.InputSimd128Register(1).V##FORMAT()); \
+ break; \
+ }
SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
@@ -2293,8 +2406,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_FCM_L_CASE(kArm64FLe, le, ge);
SIMD_FCM_G_CASE(kArm64FGt, gt);
SIMD_FCM_G_CASE(kArm64FGe, ge);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F64x2Qfma, Fmla, 2D);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F64x2Qfms, Fmls, 2D);
case kArm64F64x2Pmin: {
VRegister dst = i.OutputSimd128Register().V2D();
VRegister lhs = i.InputSimd128Register(0).V2D();
@@ -2327,8 +2440,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(s_f), i.InputInt8(2));
break;
}
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfms, Fmls, 4S);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F32x4Qfma, Fmla, 4S);
+ SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE(kArm64F32x4Qfms, Fmls, 4S);
case kArm64F32x4Pmin: {
VRegister dst = i.OutputSimd128Register().V4S();
VRegister lhs = i.InputSimd128Register(0).V4S();
@@ -2380,7 +2493,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister dst = i.OutputSimd128Register();
VRegister src1 = i.InputSimd128Register(0);
VRegister src2 = i.InputSimd128Register(1);
@@ -2481,7 +2594,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@@ -2497,7 +2610,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I32x4DotI16x8S: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat4S);
@@ -2508,7 +2621,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8DotI8x16S: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister lhs = i.InputSimd128Register(0);
VRegister rhs = i.InputSimd128Register(1);
VRegister tmp1 = scope.AcquireV(kFormat8H);
@@ -2519,17 +2632,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I32x4DotI8x16AddS: {
- UseScratchRegisterScope scope(tasm());
- VRegister lhs = i.InputSimd128Register(0);
- VRegister rhs = i.InputSimd128Register(1);
- VRegister tmp1 = scope.AcquireV(kFormat8H);
- VRegister tmp2 = scope.AcquireV(kFormat8H);
- __ Smull(tmp1, lhs.V8B(), rhs.V8B());
- __ Smull2(tmp2, lhs.V16B(), rhs.V16B());
- __ Addp(tmp1, tmp1, tmp2);
- __ Saddlp(tmp1.V4S(), tmp1);
- __ Add(i.OutputSimd128Register().V4S(), tmp1.V4S(),
- i.InputSimd128Register(2).V4S());
+ if (CpuFeatures::IsSupported(DOTPROD)) {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(2));
+ __ Sdot(i.InputSimd128Register(2).V4S(),
+ i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B());
+
+ } else {
+ UseScratchRegisterScope scope(masm());
+ VRegister lhs = i.InputSimd128Register(0);
+ VRegister rhs = i.InputSimd128Register(1);
+ VRegister tmp1 = scope.AcquireV(kFormat8H);
+ VRegister tmp2 = scope.AcquireV(kFormat8H);
+ __ Smull(tmp1, lhs.V8B(), rhs.V8B());
+ __ Smull2(tmp2, lhs.V16B(), rhs.V16B());
+ __ Addp(tmp1, tmp1, tmp2);
+ __ Saddlp(tmp1.V4S(), tmp1);
+ __ Add(i.OutputSimd128Register().V4S(), tmp1.V4S(),
+ i.InputSimd128Register(2).V4S());
+ }
break;
}
case kArm64IExtractLaneU: {
@@ -2556,7 +2677,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) {
__ Mov(temp, src1.V4S());
@@ -2577,7 +2698,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src1) {
__ Mov(temp, src1.V4S());
@@ -2591,7 +2712,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
case kArm64I16x8BitMask: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@@ -2618,7 +2739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) {
__ Mov(temp, src1.V8H());
@@ -2636,7 +2757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat8H);
if (dst == src1) {
__ Mov(temp, src1.V8H());
@@ -2647,7 +2768,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I8x16BitMask: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp = scope.AcquireQ();
@@ -2736,7 +2857,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1).V4S();
// Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat4S);
if (dst == src0) {
__ Mov(temp, src0);
@@ -2802,7 +2923,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
: 0xE0E0E0E0E0E0E0E0));
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1);
@@ -2881,7 +3002,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64V128AnyTrue: {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
// For AnyTrue, the format does not matter; also, we would like to avoid
// an expensive horizontal reduction.
VRegister temp = scope.AcquireV(kFormat4S);
@@ -2894,7 +3015,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
case Op: { \
- UseScratchRegisterScope scope(tasm()); \
+ UseScratchRegisterScope scope(masm()); \
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
@@ -2915,6 +3036,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef SIMD_BINOP_LANE_SIZE_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
+#undef SIMD_DESTRUCTIVE_RELAXED_FUSED_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -3047,7 +3169,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register input = i.InputRegister32(0);
Register temp = scope.AcquireX();
size_t const case_count = instr->InputCount() - 2;
@@ -3058,12 +3180,18 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
+ constexpr int instructions_per_jump_target = 1;
+#else
+ constexpr int instructions_per_jump_target = 0;
#endif
+ constexpr int instructions_per_case = 1 + instructions_per_jump_target;
__ Add(temp, temp, Operand(input, UXTW, entry_size_log2));
__ Br(temp);
{
- TurboAssembler::BlockPoolsScope block_pools(tasm(),
- case_count * kInstrSize);
+ const size_t instruction_count =
+ case_count * instructions_per_case + instructions_per_jump_target;
+ MacroAssembler::BlockPoolsScope block_pools(masm(),
+ instruction_count * kInstrSize);
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
__ JumpTarget();
@@ -3121,10 +3249,10 @@ void CodeGenerator::AssembleConstructFrame() {
DCHECK_EQ(required_slots % 2, 1);
__ Prologue();
// Update required_slots count since we have just claimed one extra slot.
- static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
- required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue;
+ static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
+ required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue;
} else {
- __ Push<TurboAssembler::kSignLR>(lr, fp);
+ __ Push<MacroAssembler::kSignLR>(lr, fp);
__ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -3147,7 +3275,7 @@ void CodeGenerator::AssembleConstructFrame() {
// One unoptimized frame slot has already been claimed when the actual
// arguments count was pushed.
required_slots -=
- unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
+ unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue;
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3161,7 +3289,7 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX();
__ Ldr(scratch, FieldMemOperand(
kWasmInstanceRegister,
@@ -3174,7 +3302,7 @@ void CodeGenerator::AssembleConstructFrame() {
{
// Finish the frame that hasn't been fully built yet.
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -3205,7 +3333,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Claim(required_slots);
break;
case CallDescriptor::kCallCodeObject: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -3221,7 +3349,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
#if V8_ENABLE_WEBASSEMBLY
case CallDescriptor::kCallWasmFunction: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -3231,10 +3359,13 @@ void CodeGenerator::AssembleConstructFrame() {
}
case CallDescriptor::kCallWasmImportWrapper:
case CallDescriptor::kCallWasmCapiFunction: {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
+ // This stack slot is only used for printing stack traces in V8. Also,
+ // it holds a WasmApiFunctionRef instead of the instance itself, which
+ // is taken care of in the frames accessors.
__ Push(scratch, kWasmInstanceRegister);
int extra_slots =
call_descriptor->kind() == CallDescriptor::kCallWasmImportWrapper
@@ -3247,7 +3378,7 @@ void CodeGenerator::AssembleConstructFrame() {
case CallDescriptor::kCallAddress:
#if V8_ENABLE_WEBASSEMBLY
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
- UseScratchRegisterScope temps(tasm());
+ UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
__ Push(scratch, padreg);
@@ -3377,15 +3508,15 @@ void CodeGenerator::PrepareForDeoptimizationExits(
false, false,
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
- // Check which deopt kinds exist in this Code object, to avoid emitting jumps
- // to unused entries.
+ // Check which deopt kinds exist in this InstructionStream object, to avoid
+ // emitting jumps to unused entries.
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
for (auto exit : *exits) {
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
}
// Emit the jumps to deoptimization entries.
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register scratch = scope.AcquireX();
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
for (int i = 0; i < kDeoptimizeKindCount; i++) {
@@ -3397,11 +3528,71 @@ void CodeGenerator::PrepareForDeoptimizationExits(
}
}
-void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
+AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
+ auto rep = LocationOperand::cast(source)->representation();
+ int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
+ Arm64OperandConverter g(this, nullptr);
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ if (source->IsRegister()) {
+ __ Push(padreg, g.ToRegister(source));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else if (source->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.AcquireX();
+ __ Ldr(scratch, g.ToMemOperand(source, masm()));
+ __ Push(padreg, scratch);
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ } else {
+ // No push instruction for this operand type. Bump the stack pointer and
+ // assemble the move.
+ __ Sub(sp, sp, Operand(new_slots * kSystemPointerSize));
+ frame_access_state()->IncreaseSPDelta(new_slots);
+ AssembleMove(source, &stack_slot);
+ }
+ temp_slots_ += new_slots;
+ return stack_slot;
+}
+
+void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
+ int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
+ frame_access_state()->IncreaseSPDelta(-new_slots);
+ Arm64OperandConverter g(this, nullptr);
+ if (dest->IsRegister()) {
+ __ Pop(g.ToRegister(dest), padreg);
+ } else if (dest->IsStackSlot()) {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.AcquireX();
+ __ Pop(scratch, padreg);
+ __ Str(scratch, g.ToMemOperand(dest, masm()));
+ } else {
+ int last_frame_slot_id =
+ frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
+ int sp_delta = frame_access_state_->sp_delta();
+ int slot_id = last_frame_slot_id + sp_delta + new_slots;
+ AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
+ AssembleMove(&stack_slot, dest);
+ __ Add(sp, sp, Operand(new_slots * kSystemPointerSize));
+ }
+ temp_slots_ -= new_slots;
+}
+
+void CodeGenerator::PopTempStackSlots() {
+ if (temp_slots_ > 0) {
+ frame_access_state()->IncreaseSPDelta(-temp_slots_);
+ __ add(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
+ temp_slots_ = 0;
+ }
+}
+
+void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
+ MachineRepresentation rep) {
// Must be kept in sync with {MoveTempLocationTo}.
DCHECK(!source->IsImmediate());
- auto rep = LocationOperand::cast(source)->representation();
- move_cycle_.temps.emplace(tasm());
+ move_cycle_.temps.emplace(masm());
auto& temps = *move_cycle_.temps;
// Temporarily exclude the reserved scratch registers while we pick one to
// resolve the move cycle. Re-include them immediately afterwards as they
@@ -3439,7 +3630,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
scratch_reg.code());
Arm64OperandConverter g(this, nullptr);
if (source->IsStackSlot()) {
- __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm()));
+ __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm()));
} else {
DCHECK(source->IsRegister());
__ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
@@ -3452,27 +3643,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source) {
} else {
// The scratch registers are blocked by pending moves. Use the stack
// instead.
- int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
- Arm64OperandConverter g(this, nullptr);
- if (source->IsRegister()) {
- __ Push(g.ToRegister(source), padreg);
- } else if (source->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.AcquireX();
- __ Ldr(scratch, g.ToMemOperand(source, tasm()));
- __ Push(scratch, padreg);
- } else {
- // No push instruction for this operand type. Bump the stack pointer and
- // assemble the move.
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- __ Sub(sp, sp, Operand(new_slots * kSystemPointerSize));
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(source, &temp);
- }
- frame_access_state()->IncreaseSPDelta(new_slots);
+ Push(source);
}
}
@@ -3488,7 +3659,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
move_cycle_.scratch_reg->code());
Arm64OperandConverter g(this, nullptr);
if (dest->IsStackSlot()) {
- __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm()));
+ __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm()));
} else {
DCHECK(dest->IsRegister());
__ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
@@ -3499,25 +3670,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
AssembleMove(&scratch, dest);
}
} else {
- int new_slots = RoundUp<2>(ElementSizeInPointers(rep));
- frame_access_state()->IncreaseSPDelta(-new_slots);
- Arm64OperandConverter g(this, nullptr);
- if (dest->IsRegister()) {
- __ Pop(padreg, g.ToRegister(dest));
- } else if (dest->IsStackSlot()) {
- UseScratchRegisterScope temps2(tasm());
- Register scratch = temps2.AcquireX();
- __ Pop(padreg, scratch);
- __ Str(scratch, g.ToMemOperand(dest, tasm()));
- } else {
- int last_frame_slot_id =
- frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
- int sp_delta = frame_access_state_->sp_delta();
- int temp_slot = last_frame_slot_id + sp_delta + new_slots;
- AllocatedOperand temp(LocationOperand::STACK_SLOT, rep, temp_slot);
- AssembleMove(&temp, dest);
- __ Add(sp, sp, Operand(new_slots * kSystemPointerSize));
- }
+ Pop(dest, rep);
}
// Restore the default state to release the {UseScratchRegisterScope} and to
// prepare for the next cycle.
@@ -3528,9 +3681,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
auto move_type = MoveType::InferMove(&move->source(), &move->destination());
if (move_type == MoveType::kStackToStack) {
Arm64OperandConverter g(this, nullptr);
- MemOperand src = g.ToMemOperand(&move->source(), tasm());
- MemOperand dst = g.ToMemOperand(&move->destination(), tasm());
- UseScratchRegisterScope temps(tasm());
+ MemOperand src = g.ToMemOperand(&move->source(), masm());
+ MemOperand dst = g.ToMemOperand(&move->destination(), masm());
+ UseScratchRegisterScope temps(masm());
if (move->source().IsSimd128StackSlot()) {
VRegister temp = temps.AcquireQ();
move_cycle_.scratch_fp_regs.set(temp);
@@ -3545,11 +3698,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
// Offset doesn't fit into the immediate field so the assembler will emit
// two instructions and use a second temp register.
if ((src.IsImmediateOffset() &&
- !tasm()->IsImmLSScaled(src_offset, src_size) &&
- !tasm()->IsImmLSUnscaled(src_offset)) ||
+ !masm()->IsImmLSScaled(src_offset, src_size) &&
+ !masm()->IsImmLSUnscaled(src_offset)) ||
(dst.IsImmediateOffset() &&
- !tasm()->IsImmLSScaled(dst_offset, dst_size) &&
- !tasm()->IsImmLSUnscaled(dst_offset))) {
+ !masm()->IsImmLSScaled(dst_offset, dst_size) &&
+ !masm()->IsImmLSUnscaled(dst_offset))) {
Register temp = temps.AcquireX();
move_cycle_.scratch_regs.set(temp);
}
@@ -3573,7 +3726,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ __ LoadTaggedRoot(dst, index);
} else {
// TODO(v8:8977): Even though this mov happens on 32 bits (Note the
// .W()) and we are passing along the RelocInfo, we still haven't made
@@ -3598,7 +3751,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
return;
case MoveType::kRegisterToStack: {
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst);
} else {
@@ -3613,7 +3766,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return;
}
case MoveType::kStackToRegister: {
- MemOperand src = g.ToMemOperand(source, tasm());
+ MemOperand src = g.ToMemOperand(source, masm());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
@@ -3628,15 +3781,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
return;
}
case MoveType::kStackToStack: {
- MemOperand src = g.ToMemOperand(source, tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsSimd128StackSlot()) {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireQ();
__ Ldr(temp, src);
__ Str(temp, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, dst);
@@ -3660,9 +3813,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (destination->IsStackSlot()) {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
Register temp = scope.AcquireX();
MoveConstantToRegister(temp, src);
__ Str(temp, dst);
@@ -3670,7 +3823,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, dst);
@@ -3680,7 +3833,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, dst);
@@ -3711,8 +3864,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
return;
case MoveType::kRegisterToStack: {
- UseScratchRegisterScope scope(tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ UseScratchRegisterScope scope(masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
if (source->IsRegister()) {
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
@@ -3720,7 +3873,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Ldr(src, dst);
__ Str(temp, dst);
} else {
- UseScratchRegisterScope scope(tasm());
+ UseScratchRegisterScope scope(masm());
VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD();
@@ -3738,9 +3891,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
return;
}
case MoveType::kStackToStack: {
- UseScratchRegisterScope scope(tasm());
- MemOperand src = g.ToMemOperand(source, tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
+ UseScratchRegisterScope scope(masm());
+ MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) {