summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/s390/macro-assembler-s390.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/s390/macro-assembler-s390.cc')
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc1298
1 files changed, 627 insertions, 671 deletions
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 79c9cd8ec6..7f88a15259 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes =
} // namespace
-void TurboAssembler::DoubleMax(DoubleRegister result_reg,
+void MacroAssembler::DoubleMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -101,7 +101,7 @@ void TurboAssembler::DoubleMax(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::DoubleMin(DoubleRegister result_reg,
+void MacroAssembler::DoubleMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -152,7 +152,7 @@ void TurboAssembler::DoubleMin(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::FloatMax(DoubleRegister result_reg,
+void MacroAssembler::FloatMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -197,7 +197,7 @@ void TurboAssembler::FloatMax(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::FloatMin(DoubleRegister result_reg,
+void MacroAssembler::FloatMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -249,39 +249,39 @@ void TurboAssembler::FloatMin(DoubleRegister result_reg,
bind(&done);
}
-void TurboAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_POS_INF, dst, src);
}
-void TurboAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TOWARD_POS_INF, dst, src);
}
-void TurboAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_NEG_INF, dst, src);
}
-void TurboAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TOWARD_NEG_INF, dst, src);
}
-void TurboAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TOWARD_0, dst, src);
}
-void TurboAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TOWARD_0, dst, src);
}
-void TurboAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) {
fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
}
-void TurboAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) {
fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -298,7 +298,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
Register exclusion1, Register exclusion2,
Register exclusion3) {
int bytes = 0;
@@ -316,7 +316,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
Register exclusion1, Register exclusion2,
Register exclusion3) {
int bytes = 0;
@@ -333,7 +333,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
return bytes;
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
@@ -343,18 +343,17 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
- destination,
- FieldMemOperand(destination,
- FixedArray::OffsetOfElementAt(constant_index)),
- r1);
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)),
+ r1);
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
LoadU64(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
mov(destination, kRootRegister);
@@ -366,7 +365,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-MemOperand TurboAssembler::ExternalReferenceAsOperand(
+MemOperand MacroAssembler::ExternalReferenceAsOperand(
ExternalReference reference, Register scratch) {
if (root_array_available_ && options().enable_root_relative_access) {
int64_t offset =
@@ -396,9 +395,9 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand(
return MemOperand(scratch, 0);
}
-void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
+void MacroAssembler::Jump(Register target, Condition cond) { b(cond, target); }
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
Label skip;
@@ -410,13 +409,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -431,14 +430,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Move(scratch, reference);
Jump(scratch);
}
-void TurboAssembler::Call(Register target) {
+void MacroAssembler::Call(Register target) {
// Branch to target via indirect branch
basr(r14, target);
}
@@ -461,7 +460,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
return size;
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(cond == al);
@@ -469,7 +468,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
basr(r14, ip);
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
@@ -481,12 +480,11 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
CallBuiltin(builtin);
return;
}
- DCHECK(code->IsExecutable());
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code, rmode);
}
-void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
@@ -503,14 +501,14 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
Call(ip);
break;
case BuiltinCallJumpMode::kForMksnapshot: {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
call(code, RelocInfo::CODE_TARGET);
break;
}
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
@@ -529,7 +527,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
break;
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
} else {
LoadU64(ip, EntryFromBuiltinAsOperand(builtin));
@@ -540,7 +538,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) {
}
}
-void TurboAssembler::Drop(int count) {
+void MacroAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
if (is_uint12(total)) {
@@ -553,17 +551,14 @@ void TurboAssembler::Drop(int count) {
}
}
-void TurboAssembler::Drop(Register count, Register scratch) {
+void MacroAssembler::Drop(Register count, Register scratch) {
ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
AddS64(sp, sp, scratch);
}
-void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
- Register scratch) {
- LoadTaggedPointerField(
- scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- LoadS32(scratch, FieldMemOperand(
- scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::TestCodeIsMarkedForDeoptimization(Register code,
+ Register scratch) {
+ LoadU16(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
TestBit(scratch, Code::kMarkedForDeoptimizationBit, scratch);
}
@@ -572,19 +567,19 @@ Operand MacroAssembler::ClearedValue() const {
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
}
-void TurboAssembler::Call(Label* target) { b(r14, target); }
+void MacroAssembler::Call(Label* target) { b(r14, target); }
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
mov(r0, Operand(smi));
push(r0);
}
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
+void MacroAssembler::Move(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
@@ -602,7 +597,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
}
}
-void TurboAssembler::Move(Register dst, ExternalReference reference) {
+void MacroAssembler::Move(Register dst, ExternalReference reference) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -613,7 +608,7 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) {
mov(dst, Operand(reference));
}
-void TurboAssembler::Move(Register dst, Register src, Condition cond) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
if (dst != src) {
if (cond == al) {
mov(dst, src);
@@ -623,38 +618,38 @@ void TurboAssembler::Move(Register dst, Register src, Condition cond) {
}
}
-void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (dst != src) {
ldr(dst, src);
}
}
-void TurboAssembler::Move(Register dst, const MemOperand& src) {
+void MacroAssembler::Move(Register dst, const MemOperand& src) {
LoadU64(dst, src);
}
// Wrapper around Assembler::mvc (SS-a format)
-void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
+void MacroAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length) {
mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
// Wrapper around Assembler::clc (SS-a format)
-void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1,
+void MacroAssembler::CompareLogicalChar(const MemOperand& opnd1,
const MemOperand& opnd2,
const Operand& length) {
clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
// Wrapper around Assembler::xc (SS-a format)
-void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
+void MacroAssembler::ExclusiveOrChar(const MemOperand& opnd1,
const MemOperand& opnd2,
const Operand& length) {
xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
// Wrapper around Assembler::risbg(n) (RIE-f)
-void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
+void MacroAssembler::RotateInsertSelectBits(Register dst, Register src,
const Operand& startBit,
const Operand& endBit,
const Operand& shiftAmt,
@@ -667,7 +662,7 @@ void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
risbg(dst, src, startBit, endBit, shiftAmt);
}
-void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
+void MacroAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
Label* L) {
#if V8_TARGET_ARCH_S390X
brxhg(dst, inc, L);
@@ -676,7 +671,7 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
#endif // V8_TARGET_ARCH_S390X
}
-void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+void MacroAssembler::PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order) {
Label loop, done;
@@ -707,7 +702,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void TurboAssembler::MultiPush(RegList regs, Register location) {
+void MacroAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
@@ -720,7 +715,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
}
}
-void TurboAssembler::MultiPop(RegList regs, Register location) {
+void MacroAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
@@ -732,7 +727,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
+void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -746,7 +741,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
@@ -761,7 +756,7 @@ void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
}
}
-void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
+void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
@@ -774,7 +769,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
Register location) {
int16_t stack_offset = 0;
@@ -788,7 +783,7 @@ void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -822,7 +817,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
#endif
}
-void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
+void MacroAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -854,33 +849,36 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
#endif
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
- Condition) {
- LoadU64(destination,
- MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
+void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) {
+ ASM_CODE_COMMENT(this);
+ if (CanBeImmediate(index)) {
+ mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO));
+ return;
+ }
+ LoadRoot(destination, index);
}
-void TurboAssembler::LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch) {
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
- } else {
- LoadU64(destination, field_operand, scratch);
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
+ Condition) {
+ if (CanBeImmediate(index)) {
+ DecompressTagged(destination, ReadOnlyRootPtr(index));
+ return;
}
+ LoadU64(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
-void TurboAssembler::LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand,
- const Register& scratch) {
+void MacroAssembler::LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
LoadU64(destination, field_operand, scratch);
}
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre31Bits()) {
LoadS32(dst, src);
} else {
@@ -889,11 +887,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
SmiUntag(dst);
}
-void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
SmiUntag(dst, src);
}
-void TurboAssembler::StoreTaggedField(const Register& value,
+void MacroAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
@@ -905,53 +903,43 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
Register src) {
RecordComment("[ DecompressTaggedSigned");
llgfr(destination, src);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedSigned(Register destination,
+void MacroAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
llgf(destination, field_operand);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- Register source) {
- RecordComment("[ DecompressTaggedPointer");
+void MacroAssembler::DecompressTagged(Register destination, Register source) {
+ RecordComment("[ DecompressTagged");
llgfr(destination, source);
agr(destination, kRootRegister);
RecordComment("]");
}
-void TurboAssembler::DecompressTaggedPointer(Register destination,
- MemOperand field_operand) {
- RecordComment("[ DecompressTaggedPointer");
- llgf(destination, field_operand);
- agr(destination, kRootRegister);
- RecordComment("]");
-}
-
-void TurboAssembler::DecompressAnyTagged(Register destination,
- MemOperand field_operand) {
- RecordComment("[ DecompressAnyTagged");
+void MacroAssembler::DecompressTagged(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTagged");
llgf(destination, field_operand);
agr(destination, kRootRegister);
RecordComment("]");
}
-void TurboAssembler::DecompressAnyTagged(Register destination,
- Register source) {
- RecordComment("[ DecompressAnyTagged");
- llgfr(destination, source);
+void MacroAssembler::DecompressTagged(const Register& destination,
+ Tagged_t immediate) {
+ ASM_CODE_COMMENT(this);
+ mov(destination, Operand(immediate, RelocInfo::NO_INFO));
agr(destination, kRootRegister);
- RecordComment("]");
}
-void TurboAssembler::LoadTaggedSignedField(Register destination,
+void MacroAssembler::LoadTaggedSignedField(Register destination,
MemOperand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedSigned(destination, field_operand);
@@ -999,17 +987,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, slot_address));
@@ -1032,7 +1020,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -1055,7 +1043,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -1083,7 +1071,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
SaveFPRegsMode fp_mode, SmiCheck smi_check) {
DCHECK(!AreAliased(object, slot_address, value));
if (v8_flags.debug_code) {
- LoadTaggedPointerField(r0, MemOperand(slot_address));
+ LoadTaggedField(r0, MemOperand(slot_address));
CmpS64(value, r0);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -1101,8 +1089,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- eq, &done);
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
@@ -1128,7 +1115,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
ASM_CODE_COMMENT(this);
int fp_delta = 0;
CleanseP(r14);
@@ -1142,7 +1129,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
-void TurboAssembler::PopCommonFrame(Register marker_reg) {
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Pop(r14, fp, marker_reg);
} else {
@@ -1150,7 +1137,7 @@ void TurboAssembler::PopCommonFrame(Register marker_reg) {
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
CleanseP(r14);
if (function_reg.is_valid()) {
@@ -1164,7 +1151,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(kJavaScriptCallArgCountRegister);
}
-void TurboAssembler::RestoreFrameStateForTailCall() {
+void MacroAssembler::RestoreFrameStateForTailCall() {
// if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
// LoadU64(kConstantPoolRegister,
// MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -1175,7 +1162,7 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
-void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN
if (dst != src) ldr(dst, src);
@@ -1183,11 +1170,11 @@ void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
sdbr(dst, kDoubleRegZero);
}
-void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
+void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
cdfbr(dst, src);
}
-void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
+void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
Register src) {
if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
cdlfbr(Condition(5), Condition(0), dst, src);
@@ -1199,36 +1186,36 @@ void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
}
}
-void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
+void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
cefbra(Condition(4), dst, src);
}
-void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
+void MacroAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
Register src) {
celfbr(Condition(4), Condition(0), dst, src);
}
-void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
+void MacroAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
Register src) {
cegbr(double_dst, src);
}
-void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
+void MacroAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdgbr(double_dst, src);
}
-void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
+void MacroAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
Register src) {
celgbr(Condition(0), Condition(0), double_dst, src);
}
-void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
+void MacroAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdlgbr(Condition(0), Condition(0), double_dst, src);
}
-void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
+void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1250,7 +1237,7 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
cgebr(m, dst, double_input);
}
-void TurboAssembler::ConvertDoubleToInt64(const Register dst,
+void MacroAssembler::ConvertDoubleToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1272,7 +1259,7 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
cgdbr(m, dst, double_input);
}
-void TurboAssembler::ConvertDoubleToInt32(const Register dst,
+void MacroAssembler::ConvertDoubleToInt32(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1298,7 +1285,7 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
cfdbr(m, dst, double_input);
}
-void TurboAssembler::ConvertFloat32ToInt32(const Register result,
+void MacroAssembler::ConvertFloat32ToInt32(const Register result,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1324,7 +1311,7 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
cfebr(m, result, double_input);
}
-void TurboAssembler::ConvertFloat32ToUnsignedInt32(
+void MacroAssembler::ConvertFloat32ToUnsignedInt32(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1349,7 +1336,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
clfebr(m, Condition(0), result, double_input);
}
-void TurboAssembler::ConvertFloat32ToUnsignedInt64(
+void MacroAssembler::ConvertFloat32ToUnsignedInt64(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1371,7 +1358,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
clgebr(m, Condition(0), result, double_input);
}
-void TurboAssembler::ConvertDoubleToUnsignedInt64(
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1393,7 +1380,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
clgdbr(m, Condition(0), dst, double_input);
}
-void TurboAssembler::ConvertDoubleToUnsignedInt32(
+void MacroAssembler::ConvertDoubleToUnsignedInt32(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -1418,15 +1405,15 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
clfdbr(m, Condition(0), dst, double_input);
}
-void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
+void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
lgdr(dst, src);
}
-void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
ldgr(dst, src);
}
-void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
+void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -1435,12 +1422,12 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
-void TurboAssembler::Prologue(Register base, int prologue_offset) {
+void MacroAssembler::Prologue(Register base, int prologue_offset) {
DCHECK(base != no_reg);
PushStandardFrame(r3);
}
-void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
+void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode) {
int receiver_bytes =
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
@@ -1466,7 +1453,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
}
}
-void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
+void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
Register receiver,
ArgumentsCountType type,
ArgumentsCountMode mode) {
@@ -1481,7 +1468,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
}
}
-void TurboAssembler::EnterFrame(StackFrame::Type type,
+void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
ASM_CODE_COMMENT(this);
// We create a stack frame with:
@@ -1502,7 +1489,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
#endif // V8_ENABLE_WEBASSEMBLY
}
-int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
ASM_CODE_COMMENT(this);
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer.
@@ -1542,7 +1529,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// gaps
// Args
// ABIRes <- newSP
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -1574,20 +1561,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreU64(cp, MemOperand(r1));
- // Optionally save all volatile double registers.
- if (save_doubles) {
- MultiPushDoubles(kCallerSavedDoubles);
- // Note that d0 will be accessible at
- // fp - ExitFrameConstants::kFrameSize -
- // kNumCallerSavedDoubles * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
- const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (frame_alignment > 0) {
DCHECK_EQ(frame_alignment, 8);
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
@@ -1601,7 +1579,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreU64(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1617,17 +1595,8 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+void MacroAssembler::LeaveExitFrame(Register argument_count,
bool argument_count_is_length) {
- // Optionally restore all double registers.
- if (save_doubles) {
- // Calculate the stack location of the saved doubles and restore them.
- const int kNumRegs = kNumCallerSavedDoubles;
- lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumRegs * kDoubleSize)));
- MultiPopDoubles(kCallerSavedDoubles, r5);
- }
-
// Clear top frame.
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
@@ -1657,11 +1626,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d0);
}
-void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d0);
}
@@ -1672,10 +1641,10 @@ MemOperand MacroAssembler::StackLimitAsMemOperand(StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
return MemOperand(kRootRegister, offset);
}
@@ -1829,8 +1798,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(code,
- FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -1855,9 +1823,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_reg = r4;
Register temp_reg = r6;
- LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
- LoadTaggedPointerField(
- temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
+ LoadTaggedField(temp_reg,
+ FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
LoadU16(
expected_reg,
FieldMemOperand(temp_reg,
@@ -1878,8 +1846,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r3);
// Get the function and setup the context.
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -1961,6 +1928,10 @@ void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
+ if (CanBeImmediate(index)) {
+ CompareTagged(obj, Operand(ReadOnlyRootPtr(index)));
+ return;
+ }
int32_t offset = RootRegisterOffsetForRootIndex(index);
#ifdef V8_TARGET_BIG_ENDIAN
offset += (COMPRESS_POINTERS_BOOL ? kTaggedSize : 0);
@@ -1975,7 +1946,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
ble(on_in_range);
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -2007,7 +1978,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
bind(&done);
}
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
ConvertDoubleToInt64(result, double_input);
@@ -2040,7 +2011,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
{
- __ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch);
+ __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, scratch);
__ bne(&heal_optimized_code_slot);
}
@@ -2049,7 +2020,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
r7);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadCodeObjectEntry(r4, optimized_code_entry);
+ __ LoadCodeEntry(r4, optimized_code_entry);
__ Jump(r4);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -2156,15 +2127,14 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, r8);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r2 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -2179,10 +2149,9 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
mov(r2, Operand(num_arguments));
Move(r3, ExternalReference::Create(f));
#if V8_TARGET_ARCH_S390X
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
#else
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1);
#endif
Call(code, RelocInfo::CODE_TARGET);
@@ -2200,16 +2169,11 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r3, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
CmpS32(in, Operand(kClearedWeakHeapObjectLower32));
@@ -2244,7 +2208,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
+void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
Label L;
b(cond, &L);
Abort(reason);
@@ -2252,7 +2216,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -2299,29 +2263,28 @@ void TurboAssembler::Abort(AbortReason reason) {
// will not return here
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
- LoadTaggedPointerField(destination,
- FieldMemOperand(object, HeapObject::kMapOffset));
+void MacroAssembler::LoadMap(Register destination, Register object) {
+ LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
#ifdef V8_ENABLE_DEBUG_CODE
-void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
+void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (v8_flags.debug_code) Check(cond, reason, cr);
}
-void TurboAssembler::AssertUnreachable(AbortReason reason) {
+void MacroAssembler::AssertUnreachable(AbortReason reason) {
if (v8_flags.debug_code) Abort(reason);
}
-void TurboAssembler::AssertNotSmi(Register object) {
+void MacroAssembler::AssertNotSmi(Register object) {
if (v8_flags.debug_code) {
static_assert(kSmiTag == 0);
TestIfSmi(object);
@@ -2329,7 +2292,7 @@ void TurboAssembler::AssertNotSmi(Register object) {
}
}
-void TurboAssembler::AssertSmi(Register object) {
+void MacroAssembler::AssertSmi(Register object) {
if (v8_flags.debug_code) {
static_assert(kSmiTag == 0);
TestIfSmi(object);
@@ -2425,7 +2388,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
static const int kRegisterPassedArguments = 5;
-int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
@@ -2439,7 +2402,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -2462,16 +2425,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
-void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
-void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2 == d0) {
DCHECK(src1 != d2);
@@ -2483,53 +2446,60 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
Move(ip, function);
- CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
- // Save the frame pointer and PC so that the stack layout remains iterable,
- // even without an ExitFrame which normally exists between JS and C frames.
- Register addr_scratch = r1;
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- LoadPC(r0);
- StoreU64(r0, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- StoreU64(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
-
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_pc_address(isolate()));
- LoadPC(r0);
- StoreU64(r0, MemOperand(addr_scratch));
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreU64(fp, MemOperand(addr_scratch));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ LoadPC(r0);
+ StoreU64(r0, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreU64(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r1;
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ LoadPC(r0);
+ StoreU64(r0, MemOperand(addr_scratch));
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreU64(fp, MemOperand(addr_scratch));
+ }
}
// Just call directly. The function called cannot cause a GC, or
@@ -2543,19 +2513,22 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
- // We don't unset the PC; the FP is the source of truth.
- Register zero_scratch = r0;
- lghi(zero_scratch, Operand::Zero());
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register zero_scratch = r0;
+ lghi(zero_scratch, Operand::Zero());
- if (root_array_available()) {
- StoreU64(
- zero_scratch,
- MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- Move(addr_scratch,
- ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreU64(zero_scratch, MemOperand(addr_scratch));
+ if (root_array_available()) {
+ StoreU64(zero_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ Register addr_scratch = r1;
+ Move(addr_scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreU64(zero_scratch, MemOperand(addr_scratch));
+ }
}
int stack_passed_arguments =
@@ -2569,7 +2542,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-void TurboAssembler::CheckPageFlag(
+void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
@@ -2629,9 +2602,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::mov(Register dst, Register src) { lgr(dst, src); }
+void MacroAssembler::mov(Register dst, Register src) { lgr(dst, src); }
-void TurboAssembler::mov(Register dst, const Operand& src) {
+void MacroAssembler::mov(Register dst, const Operand& src) {
int64_t value = 0;
if (src.is_heap_number_request()) {
@@ -2676,7 +2649,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
iilf(dst, Operand(lo_32));
}
-void TurboAssembler::MulS32(Register dst, const MemOperand& src1) {
+void MacroAssembler::MulS32(Register dst, const MemOperand& src1) {
if (is_uint12(src1.offset())) {
ms(dst, src1);
} else if (is_int20(src1.offset())) {
@@ -2686,9 +2659,9 @@ void TurboAssembler::MulS32(Register dst, const MemOperand& src1) {
}
}
-void TurboAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); }
+void MacroAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); }
-void TurboAssembler::MulS32(Register dst, const Operand& src1) {
+void MacroAssembler::MulS32(Register dst, const Operand& src1) {
msfi(dst, src1);
}
@@ -2699,19 +2672,19 @@ void TurboAssembler::MulS32(Register dst, const Operand& src1) {
srlg(dst, dst, Operand(32)); \
}
-void TurboAssembler::MulHighS32(Register dst, Register src1,
+void MacroAssembler::MulHighS32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHigh32(msgf);
}
-void TurboAssembler::MulHighS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighS32(Register dst, Register src1, Register src2) {
if (dst == src2) {
std::swap(src1, src2);
}
Generate_MulHigh32(msgfr);
}
-void TurboAssembler::MulHighS32(Register dst, Register src1,
+void MacroAssembler::MulHighS32(Register dst, Register src1,
const Operand& src2) {
Generate_MulHigh32(msgfi);
}
@@ -2725,16 +2698,16 @@ void TurboAssembler::MulHighS32(Register dst, Register src1,
LoadU32(dst, r0); \
}
-void TurboAssembler::MulHighU32(Register dst, Register src1,
+void MacroAssembler::MulHighU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHighU32(ml);
}
-void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
Generate_MulHighU32(mlr);
}
-void TurboAssembler::MulHighU32(Register dst, Register src1,
+void MacroAssembler::MulHighU32(Register dst, Register src1,
const Operand& src2) {
USE(dst);
USE(src1);
@@ -2751,7 +2724,7 @@ void TurboAssembler::MulHighU32(Register dst, Register src1,
cgfr(dst, dst); \
}
-void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2) {
Register result = dst;
if (src2.rx() == dst || src2.rb() == dst) dst = r0;
@@ -2759,7 +2732,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
if (result != dst) llgfr(result, dst);
}
-void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Register src2) {
if (dst == src2) {
std::swap(src1, src2);
@@ -2767,7 +2740,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Generate_Mul32WithOverflowIfCCUnequal(msgfr);
}
-void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const Operand& src2) {
Generate_Mul32WithOverflowIfCCUnequal(msgfi);
}
@@ -2781,12 +2754,12 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
LoadU32(dst, r1); \
}
-void TurboAssembler::DivS32(Register dst, Register src1,
+void MacroAssembler::DivS32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div32(dsgf);
}
-void TurboAssembler::DivS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivS32(Register dst, Register src1, Register src2) {
Generate_Div32(dsgfr);
}
@@ -2800,12 +2773,12 @@ void TurboAssembler::DivS32(Register dst, Register src1, Register src2) {
LoadU32(dst, r1); \
}
-void TurboAssembler::DivU32(Register dst, Register src1,
+void MacroAssembler::DivU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU32(dl);
}
-void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
Generate_DivU32(dlr);
}
@@ -2818,12 +2791,12 @@ void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
-void TurboAssembler::DivS64(Register dst, Register src1,
+void MacroAssembler::DivS64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div64(dsg);
}
-void TurboAssembler::DivS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivS64(Register dst, Register src1, Register src2) {
Generate_Div64(dsgr);
}
@@ -2837,12 +2810,12 @@ void TurboAssembler::DivS64(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
-void TurboAssembler::DivU64(Register dst, Register src1,
+void MacroAssembler::DivU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU64(dlg);
}
-void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
Generate_DivU64(dlgr);
}
@@ -2855,12 +2828,12 @@ void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
LoadU32(dst, r0); \
}
-void TurboAssembler::ModS32(Register dst, Register src1,
+void MacroAssembler::ModS32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod32(dsgf);
}
-void TurboAssembler::ModS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModS32(Register dst, Register src1, Register src2) {
Generate_Mod32(dsgfr);
}
@@ -2874,12 +2847,12 @@ void TurboAssembler::ModS32(Register dst, Register src1, Register src2) {
LoadU32(dst, r0); \
}
-void TurboAssembler::ModU32(Register dst, Register src1,
+void MacroAssembler::ModU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU32(dl);
}
-void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
Generate_ModU32(dlr);
}
@@ -2892,12 +2865,12 @@ void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
-void TurboAssembler::ModS64(Register dst, Register src1,
+void MacroAssembler::ModS64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod64(dsg);
}
-void TurboAssembler::ModS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModS64(Register dst, Register src1, Register src2) {
Generate_Mod64(dsgr);
}
@@ -2911,54 +2884,54 @@ void TurboAssembler::ModS64(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
-void TurboAssembler::ModU64(Register dst, Register src1,
+void MacroAssembler::ModU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU64(dlg);
}
-void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::ModU64(Register dst, Register src1, Register src2) {
Generate_ModU64(dlgr);
}
#undef Generate_ModU64
-void TurboAssembler::MulS64(Register dst, const Operand& opnd) {
+void MacroAssembler::MulS64(Register dst, const Operand& opnd) {
msgfi(dst, opnd);
}
-void TurboAssembler::MulS64(Register dst, Register src) { msgr(dst, src); }
+void MacroAssembler::MulS64(Register dst, Register src) { msgr(dst, src); }
-void TurboAssembler::MulS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::MulS64(Register dst, const MemOperand& opnd) {
msg(dst, opnd);
}
-void TurboAssembler::MulHighS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighS64(Register dst, Register src1, Register src2) {
mgrk(r0, src1, src2);
lgr(dst, r0);
}
-void TurboAssembler::MulHighS64(Register dst, Register src1,
+void MacroAssembler::MulHighS64(Register dst, Register src1,
const MemOperand& src2) {
// TODO(v8): implement this.
UNIMPLEMENTED();
}
-void TurboAssembler::MulHighU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::MulHighU64(Register dst, Register src1, Register src2) {
lgr(r1, src1);
mlgr(r0, src2);
lgr(dst, r0);
}
-void TurboAssembler::MulHighU64(Register dst, Register src1,
+void MacroAssembler::MulHighU64(Register dst, Register src1,
const MemOperand& src2) {
// TODO(v8): implement this.
UNIMPLEMENTED();
}
-void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
+void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
sqdbr(result, input);
}
-void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
+void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
if (is_uint12(input.offset())) {
sqdb(result, input);
} else {
@@ -2971,7 +2944,7 @@ void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
//----------------------------------------------------------------------------
// Add 32-bit (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddS32(Register dst, const Operand& opnd) {
+void MacroAssembler::AddS32(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
ahi(dst, opnd);
else
@@ -2979,19 +2952,19 @@ void TurboAssembler::AddS32(Register dst, const Operand& opnd) {
}
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddS64(Register dst, const Operand& opnd) {
+void MacroAssembler::AddS64(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
aghi(dst, opnd);
else
agfi(dst, opnd);
}
-void TurboAssembler::AddS32(Register dst, Register src, int32_t opnd) {
+void MacroAssembler::AddS32(Register dst, Register src, int32_t opnd) {
AddS32(dst, src, Operand(opnd));
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
-void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
ahik(dst, src, opnd);
@@ -3002,12 +2975,12 @@ void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
AddS32(dst, opnd);
}
-void TurboAssembler::AddS64(Register dst, Register src, int32_t opnd) {
+void MacroAssembler::AddS64(Register dst, Register src, int32_t opnd) {
AddS64(dst, src, Operand(opnd));
}
// Add Pointer Size (Register dst = Register src + Immediate opnd)
-void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
aghik(dst, src, opnd);
@@ -3019,13 +2992,13 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register dst + Register src)
-void TurboAssembler::AddS32(Register dst, Register src) { ar(dst, src); }
+void MacroAssembler::AddS32(Register dst, Register src) { ar(dst, src); }
// Add Pointer Size (Register dst = Register dst + Register src)
-void TurboAssembler::AddS64(Register dst, Register src) { agr(dst, src); }
+void MacroAssembler::AddS64(Register dst, Register src) { agr(dst, src); }
// Add 32-bit (Register dst = Register src1 + Register src2)
-void TurboAssembler::AddS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddS32(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@@ -3042,7 +3015,7 @@ void TurboAssembler::AddS32(Register dst, Register src1, Register src2) {
}
// Add Pointer Size (Register dst = Register src1 + Register src2)
-void TurboAssembler::AddS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddS64(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@@ -3059,7 +3032,7 @@ void TurboAssembler::AddS64(Register dst, Register src1, Register src2) {
}
// Add 32-bit (Register-Memory)
-void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddS32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
a(dst, opnd);
@@ -3068,13 +3041,13 @@ void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) {
}
// Add Pointer Size (Register-Memory)
-void TurboAssembler::AddS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddS64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
ag(dst, opnd);
}
// Add 32-bit (Memory - Immediate)
-void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
+void MacroAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@@ -3082,7 +3055,7 @@ void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
}
// Add Pointer-sized (Memory - Immediate)
-void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
+void MacroAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@@ -3094,7 +3067,7 @@ void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
//----------------------------------------------------------------------------
// Add Logical 32-bit (Register dst = Register src1 + Register src2)
-void TurboAssembler::AddU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddU32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
alr(dst, src2);
@@ -3110,16 +3083,16 @@ void TurboAssembler::AddU32(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddU32(Register dst, const Operand& imm) {
+void MacroAssembler::AddU32(Register dst, const Operand& imm) {
alfi(dst, imm);
}
// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
-void TurboAssembler::AddU64(Register dst, const Operand& imm) {
+void MacroAssembler::AddU64(Register dst, const Operand& imm) {
algfi(dst, imm);
}
-void TurboAssembler::AddU64(Register dst, Register src1, Register src2) {
+void MacroAssembler::AddU64(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
algrk(dst, src1, src2);
@@ -3139,7 +3112,7 @@ void TurboAssembler::AddU64(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register-Memory)
-void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
al_z(dst, opnd);
@@ -3148,7 +3121,7 @@ void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) {
}
// Add Logical Pointer Size (Register-Memory)
-void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AddU64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
alg(dst, opnd);
}
@@ -3158,7 +3131,7 @@ void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
-void TurboAssembler::SubU32(Register dst, Register src1, Register src2) {
+void MacroAssembler::SubU32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
slr(dst, src2);
@@ -3175,41 +3148,41 @@ void TurboAssembler::SubU32(Register dst, Register src1, Register src2) {
}
// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
-void TurboAssembler::SubS32(Register dst, const Operand& imm) {
+void MacroAssembler::SubS32(Register dst, const Operand& imm) {
AddS32(dst, Operand(-(imm.immediate())));
}
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
-void TurboAssembler::SubS64(Register dst, const Operand& imm) {
+void MacroAssembler::SubS64(Register dst, const Operand& imm) {
AddS64(dst, Operand(-(imm.immediate())));
}
-void TurboAssembler::SubS32(Register dst, Register src, int32_t imm) {
+void MacroAssembler::SubS32(Register dst, Register src, int32_t imm) {
SubS32(dst, src, Operand(imm));
}
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
-void TurboAssembler::SubS32(Register dst, Register src, const Operand& imm) {
+void MacroAssembler::SubS32(Register dst, Register src, const Operand& imm) {
AddS32(dst, src, Operand(-(imm.immediate())));
}
-void TurboAssembler::SubS64(Register dst, Register src, int32_t imm) {
+void MacroAssembler::SubS64(Register dst, Register src, int32_t imm) {
SubS64(dst, src, Operand(imm));
}
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
-void TurboAssembler::SubS64(Register dst, Register src, const Operand& imm) {
+void MacroAssembler::SubS64(Register dst, Register src, const Operand& imm) {
AddS64(dst, src, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register dst - Register src)
-void TurboAssembler::SubS32(Register dst, Register src) { sr(dst, src); }
+void MacroAssembler::SubS32(Register dst, Register src) { sr(dst, src); }
// Subtract Pointer Size (Register dst = Register dst - Register src)
-void TurboAssembler::SubS64(Register dst, Register src) { sgr(dst, src); }
+void MacroAssembler::SubS64(Register dst, Register src) { sgr(dst, src); }
// Subtract 32-bit (Register = Register - Register)
-void TurboAssembler::SubS32(Register dst, Register src1, Register src2) {
+void MacroAssembler::SubS32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
@@ -3229,7 +3202,7 @@ void TurboAssembler::SubS32(Register dst, Register src1, Register src2) {
}
// Subtract Pointer Sized (Register = Register - Register)
-void TurboAssembler::SubS64(Register dst, Register src1, Register src2) {
+void MacroAssembler::SubS64(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sgrk(dst, src1, src2);
@@ -3249,7 +3222,7 @@ void TurboAssembler::SubS64(Register dst, Register src1, Register src2) {
}
// Subtract 32-bit (Register-Memory)
-void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubS32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
s(dst, opnd);
@@ -3258,7 +3231,7 @@ void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) {
}
// Subtract Pointer Sized (Register - Memory)
-void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubS64(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
sg(dst, opnd);
#else
@@ -3266,24 +3239,24 @@ void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) {
#endif
}
-void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
sllg(r0, src, Operand(32));
ldgr(dst, r0);
}
-void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
lgdr(dst, src);
srlg(dst, dst, Operand(32));
}
// Load And Subtract 32-bit (similar to laa/lan/lao/lax)
-void TurboAssembler::LoadAndSub32(Register dst, Register src,
+void MacroAssembler::LoadAndSub32(Register dst, Register src,
const MemOperand& opnd) {
lcr(dst, src);
laa(dst, dst, opnd);
}
-void TurboAssembler::LoadAndSub64(Register dst, Register src,
+void MacroAssembler::LoadAndSub64(Register dst, Register src,
const MemOperand& opnd) {
lcgr(dst, src);
laag(dst, dst, opnd);
@@ -3294,7 +3267,7 @@ void TurboAssembler::LoadAndSub64(Register dst, Register src,
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register - Memory)
-void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubU32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
sl(dst, opnd);
@@ -3303,7 +3276,7 @@ void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) {
}
// Subtract Logical Pointer Sized (Register - Memory)
-void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::SubU64(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
slgf(dst, opnd);
@@ -3317,13 +3290,13 @@ void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// AND 32-bit - dst = dst & src
-void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
+void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
// AND Pointer Size - dst = dst & src
-void TurboAssembler::AndP(Register dst, Register src) { ngr(dst, src); }
+void MacroAssembler::AndP(Register dst, Register src) { ngr(dst, src); }
// Non-clobbering AND 32-bit - dst = src1 & src1
-void TurboAssembler::And(Register dst, Register src1, Register src2) {
+void MacroAssembler::And(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3340,7 +3313,7 @@ void TurboAssembler::And(Register dst, Register src1, Register src2) {
}
// Non-clobbering AND pointer size - dst = src1 & src1
-void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
+void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3357,7 +3330,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
}
// AND 32-bit (Reg - Mem)
-void TurboAssembler::And(Register dst, const MemOperand& opnd) {
+void MacroAssembler::And(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
n(dst, opnd);
@@ -3366,7 +3339,7 @@ void TurboAssembler::And(Register dst, const MemOperand& opnd) {
}
// AND Pointer Size (Reg - Mem)
-void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
+void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
ng(dst, opnd);
@@ -3376,10 +3349,10 @@ void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
}
// AND 32-bit - dst = dst & imm
-void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
+void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
// AND Pointer Size - dst = dst & imm
-void TurboAssembler::AndP(Register dst, const Operand& opnd) {
+void MacroAssembler::AndP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
if (value >> 32 != -1) {
@@ -3393,13 +3366,13 @@ void TurboAssembler::AndP(Register dst, const Operand& opnd) {
}
// AND 32-bit - dst = src & imm
-void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
if (dst != src) lr(dst, src);
nilf(dst, opnd);
}
// AND Pointer Size - dst = src & imm
-void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// Try to exploit RISBG first
intptr_t value = opnd.immediate();
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -3440,13 +3413,13 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
}
// OR 32-bit - dst = dst & src
-void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
+void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
// OR Pointer Size - dst = dst & src
-void TurboAssembler::OrP(Register dst, Register src) { ogr(dst, src); }
+void MacroAssembler::OrP(Register dst, Register src) { ogr(dst, src); }
// Non-clobbering OR 32-bit - dst = src1 & src1
-void TurboAssembler::Or(Register dst, Register src1, Register src2) {
+void MacroAssembler::Or(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3463,7 +3436,7 @@ void TurboAssembler::Or(Register dst, Register src1, Register src2) {
}
// Non-clobbering OR pointer size - dst = src1 & src1
-void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
+void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3480,7 +3453,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
}
// OR 32-bit (Reg - Mem)
-void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
+void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
o(dst, opnd);
@@ -3489,7 +3462,7 @@ void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
}
// OR Pointer Size (Reg - Mem)
-void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
+void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
og(dst, opnd);
@@ -3499,10 +3472,10 @@ void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
}
// OR 32-bit - dst = dst & imm
-void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
+void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
// OR Pointer Size - dst = dst & imm
-void TurboAssembler::OrP(Register dst, const Operand& opnd) {
+void MacroAssembler::OrP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
if (value >> 32 != 0) {
@@ -3516,25 +3489,25 @@ void TurboAssembler::OrP(Register dst, const Operand& opnd) {
}
// OR 32-bit - dst = src & imm
-void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
if (dst != src) lr(dst, src);
oilf(dst, opnd);
}
// OR Pointer Size - dst = src & imm
-void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
if (dst != src) mov(dst, src);
OrP(dst, opnd);
}
// XOR 32-bit - dst = dst & src
-void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
+void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
// XOR Pointer Size - dst = dst & src
-void TurboAssembler::XorP(Register dst, Register src) { xgr(dst, src); }
+void MacroAssembler::XorP(Register dst, Register src) { xgr(dst, src); }
// Non-clobbering XOR 32-bit - dst = src1 & src1
-void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
+void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3551,7 +3524,7 @@ void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
}
// Non-clobbering XOR pointer size - dst = src1 & src1
-void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
+void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
if (dst != src1 && dst != src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3568,7 +3541,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
}
// XOR 32-bit (Reg - Mem)
-void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
+void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
x(dst, opnd);
@@ -3577,7 +3550,7 @@ void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
}
// XOR Pointer Size (Reg - Mem)
-void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
+void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
xg(dst, opnd);
@@ -3587,10 +3560,10 @@ void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
}
// XOR 32-bit - dst = dst & imm
-void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
+void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
// XOR Pointer Size - dst = dst & imm
-void TurboAssembler::XorP(Register dst, const Operand& opnd) {
+void MacroAssembler::XorP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.immediate();
xihf(dst, Operand(value >> 32));
@@ -3601,29 +3574,29 @@ void TurboAssembler::XorP(Register dst, const Operand& opnd) {
}
// XOR 32-bit - dst = src & imm
-void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
if (dst != src) lr(dst, src);
xilf(dst, opnd);
}
// XOR Pointer Size - dst = src & imm
-void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
+void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
if (dst != src) mov(dst, src);
XorP(dst, opnd);
}
-void TurboAssembler::Not32(Register dst, Register src) {
+void MacroAssembler::Not32(Register dst, Register src) {
if (src != no_reg && src != dst) lr(dst, src);
xilf(dst, Operand(0xFFFFFFFF));
}
-void TurboAssembler::Not64(Register dst, Register src) {
+void MacroAssembler::Not64(Register dst, Register src) {
if (src != no_reg && src != dst) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
}
-void TurboAssembler::NotP(Register dst, Register src) {
+void MacroAssembler::NotP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
Not64(dst, src);
#else
@@ -3631,7 +3604,7 @@ void TurboAssembler::NotP(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadPositiveP(Register result, Register input) {
+void MacroAssembler::LoadPositiveP(Register result, Register input) {
#if V8_TARGET_ARCH_S390X
lpgr(result, input);
#else
@@ -3639,7 +3612,7 @@ void TurboAssembler::LoadPositiveP(Register result, Register input) {
#endif
}
-void TurboAssembler::LoadPositive32(Register result, Register input) {
+void MacroAssembler::LoadPositive32(Register result, Register input) {
lpr(result, input);
lgfr(result, result);
}
@@ -3649,14 +3622,14 @@ void TurboAssembler::LoadPositive32(Register result, Register input) {
//-----------------------------------------------------------------------------
// Compare 32-bit Register vs Register
-void TurboAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); }
+void MacroAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); }
// Compare Pointer Sized Register vs Register
-void TurboAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
+void MacroAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
-void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpS32(Register dst, const Operand& opnd) {
if (opnd.rmode() == RelocInfo::NO_INFO) {
intptr_t value = opnd.immediate();
if (is_int16(value))
@@ -3672,7 +3645,7 @@ void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
// Compare Pointer Sized Register vs Immediate
// This helper will set up proper relocation entries if required.
-void TurboAssembler::CmpS64(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpS64(Register dst, const Operand& opnd) {
if (opnd.rmode() == RelocInfo::NO_INFO) {
cgfi(dst, opnd);
} else {
@@ -3682,7 +3655,7 @@ void TurboAssembler::CmpS64(Register dst, const Operand& opnd) {
}
// Compare 32-bit Register vs Memory
-void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpS32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
@@ -3692,14 +3665,14 @@ void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) {
}
// Compare Pointer Size Register vs Memory
-void TurboAssembler::CmpS64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpS64(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
cg(dst, opnd);
}
// Using cs or scy based on the offset
-void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
+void MacroAssembler::CmpAndSwap(Register old_val, Register new_val,
const MemOperand& opnd) {
if (is_uint12(opnd.offset())) {
cs(old_val, new_val, opnd);
@@ -3708,7 +3681,7 @@ void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
}
}
-void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
+void MacroAssembler::CmpAndSwap64(Register old_val, Register new_val,
const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
csg(old_val, new_val, opnd);
@@ -3719,10 +3692,10 @@ void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
//-----------------------------------------------------------------------------
// Compare Logical 32-bit Register vs Register
-void TurboAssembler::CmpU32(Register dst, Register src) { clr(dst, src); }
+void MacroAssembler::CmpU32(Register dst, Register src) { clr(dst, src); }
// Compare Logical Pointer Sized Register vs Register
-void TurboAssembler::CmpU64(Register dst, Register src) {
+void MacroAssembler::CmpU64(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
clgr(dst, src);
#else
@@ -3731,12 +3704,12 @@ void TurboAssembler::CmpU64(Register dst, Register src) {
}
// Compare Logical 32-bit Register vs Immediate
-void TurboAssembler::CmpU32(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpU32(Register dst, const Operand& opnd) {
clfi(dst, opnd);
}
// Compare Logical Pointer Sized Register vs Immediate
-void TurboAssembler::CmpU64(Register dst, const Operand& opnd) {
+void MacroAssembler::CmpU64(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
clgfi(dst, opnd);
@@ -3746,7 +3719,7 @@ void TurboAssembler::CmpU64(Register dst, const Operand& opnd) {
}
// Compare Logical 32-bit Register vs Memory
-void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpU32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
@@ -3756,7 +3729,7 @@ void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) {
}
// Compare Logical Pointer Sized Register vs Memory
-void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) {
+void MacroAssembler::CmpU64(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
@@ -3766,7 +3739,7 @@ void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) {
#endif
}
-void TurboAssembler::Branch(Condition c, const Operand& opnd) {
+void MacroAssembler::Branch(Condition c, const Operand& opnd) {
intptr_t value = opnd.immediate();
if (is_int16(value))
brc(c, opnd);
@@ -3775,7 +3748,7 @@ void TurboAssembler::Branch(Condition c, const Operand& opnd) {
}
// Branch On Count. Decrement R1, and branch if R1 != 0.
-void TurboAssembler::BranchOnCount(Register r1, Label* l) {
+void MacroAssembler::BranchOnCount(Register r1, Label* l) {
int32_t offset = branch_offset(l);
if (is_int16(offset)) {
#if V8_TARGET_ARCH_S390X
@@ -3789,7 +3762,7 @@ void TurboAssembler::BranchOnCount(Register r1, Label* l) {
}
}
-void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) {
intptr_t value = static_cast<intptr_t>(smi.ptr());
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
llilf(dst, Operand(value));
@@ -3800,7 +3773,7 @@ void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
#endif
}
-void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// CFI takes 32-bit immediate.
cfi(src1, Operand(smi));
@@ -3814,7 +3787,7 @@ void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
#endif
}
-void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadU64(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -3829,7 +3802,7 @@ void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
-void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU64(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -3842,7 +3815,7 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
}
// Store a "pointer" sized constant to the memory location
-void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
+void MacroAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO);
@@ -3857,7 +3830,7 @@ void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
}
}
-void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
+void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
@@ -3872,7 +3845,7 @@ void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
#endif
}
-void TurboAssembler::StoreMultipleP(Register src1, Register src2,
+void MacroAssembler::StoreMultipleP(Register src1, Register src2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
@@ -3887,7 +3860,7 @@ void TurboAssembler::StoreMultipleP(Register src1, Register src2,
#endif
}
-void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
+void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
lm(dst1, dst2, mem);
@@ -3897,7 +3870,7 @@ void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
}
}
-void TurboAssembler::StoreMultipleW(Register src1, Register src2,
+void MacroAssembler::StoreMultipleW(Register src1, Register src2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
stm(src1, src2, mem);
@@ -3908,7 +3881,7 @@ void TurboAssembler::StoreMultipleW(Register src1, Register src2,
}
// Load 32-bits and sign extend if necessary.
-void TurboAssembler::LoadS32(Register dst, Register src) {
+void MacroAssembler::LoadS32(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgfr(dst, src);
#else
@@ -3917,8 +3890,8 @@ void TurboAssembler::LoadS32(Register dst, Register src) {
}
// Load 32-bits and sign extend if necessary.
-void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
- Register scratch) {
+void MacroAssembler::LoadS32(Register dst, const MemOperand& mem,
+ Register scratch) {
int offset = mem.offset();
if (!is_int20(offset)) {
@@ -3943,7 +3916,7 @@ void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
}
// Load 32-bits and zero extend if necessary.
-void TurboAssembler::LoadU32(Register dst, Register src) {
+void MacroAssembler::LoadU32(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgfr(dst, src);
#else
@@ -3953,8 +3926,8 @@ void TurboAssembler::LoadU32(Register dst, Register src) {
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
-void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
- Register scratch) {
+void MacroAssembler::LoadU32(Register dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -3994,7 +3967,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
#endif
}
-void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadU16(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
@@ -4003,7 +3976,7 @@ void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
#endif
}
-void TurboAssembler::LoadU16(Register dst, Register src) {
+void MacroAssembler::LoadU16(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llghr(dst, src);
#else
@@ -4011,7 +3984,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadS8(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
@@ -4020,7 +3993,7 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
#endif
}
-void TurboAssembler::LoadS8(Register dst, Register src) {
+void MacroAssembler::LoadS8(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgbr(dst, src);
#else
@@ -4028,7 +4001,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadU8(Register dst, const MemOperand& mem) {
// TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
@@ -4037,7 +4010,7 @@ void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
#endif
}
-void TurboAssembler::LoadU8(Register dst, Register src) {
+void MacroAssembler::LoadU8(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgcr(dst, src);
#else
@@ -4046,34 +4019,34 @@ void TurboAssembler::LoadU8(Register dst, Register src) {
}
#ifdef V8_TARGET_BIG_ENDIAN
-void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
lrvg(dst, mem);
}
-void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch) {
lrv(dst, opnd);
LoadS32(dst, dst);
}
-void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch) {
lrv(dst, opnd);
LoadU32(dst, dst);
}
-void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
lrvh(dst, opnd);
LoadU16(dst, dst);
}
-void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
lrvh(dst, opnd);
LoadS16(dst, dst);
}
-void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch0, Register scratch1) {
bool use_vlbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
is_uint12(opnd.offset());
@@ -4087,20 +4060,20 @@ void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
lrvg(scratch, opnd);
ldgr(dst, scratch);
}
-void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
lrv(scratch, opnd);
ShiftLeftU64(scratch, scratch, Operand(32));
ldgr(dst, scratch);
}
-void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -4112,7 +4085,7 @@ void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -4124,7 +4097,7 @@ void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
@@ -4136,14 +4109,14 @@ void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
DCHECK(is_uint12(opnd.offset()));
lgdr(scratch, src);
strvg(scratch, opnd);
}
-void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
DCHECK(is_uint12(opnd.offset()));
lgdr(scratch, src);
@@ -4151,7 +4124,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
strv(scratch, opnd);
}
-void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
Register scratch1, Register scratch2) {
bool use_vstbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
is_uint12(mem.offset());
@@ -4167,73 +4140,73 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
}
#else
-void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
+void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
LoadU64(dst, mem, scratch);
}
-void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch) {
LoadS32(dst, opnd, scratch);
}
-void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
+void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch) {
LoadU32(dst, opnd, scratch);
}
-void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
LoadU16(dst, opnd);
}
-void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
+void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
LoadS16(dst, opnd);
}
-void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch0, Register scratch1) {
USE(scratch1);
LoadV128(dst, opnd, scratch0);
}
-void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
USE(scratch);
LoadF64(dst, opnd);
}
-void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
USE(scratch);
LoadF32(dst, opnd);
}
-void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem,
Register scratch) {
StoreU64(src, mem, scratch);
}
-void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem,
Register scratch) {
StoreU32(src, mem, scratch);
}
-void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem,
Register scratch) {
StoreU16(src, mem, scratch);
}
-void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
StoreF64(src, opnd);
}
-void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
+void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
Register scratch) {
StoreF32(src, opnd);
}
-void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
Register scratch1, Register scratch2) {
StoreV128(src, mem, scratch1);
}
@@ -4241,12 +4214,12 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
#endif
// Load And Test (Reg <- Reg)
-void TurboAssembler::LoadAndTest32(Register dst, Register src) {
+void MacroAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
}
// Load And Test Pointer Sized (Reg <- Reg)
-void TurboAssembler::LoadAndTestP(Register dst, Register src) {
+void MacroAssembler::LoadAndTestP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
ltgr(dst, src);
#else
@@ -4255,12 +4228,12 @@ void TurboAssembler::LoadAndTestP(Register dst, Register src) {
}
// Load And Test 32-bit (Reg <- Mem)
-void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
lt_z(dst, mem);
}
// Load And Test Pointer Sized (Reg <- Mem)
-void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
+void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
ltg(dst, mem);
#else
@@ -4269,7 +4242,7 @@ void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
}
// Load On Condition Pointer Sized (Reg <- Reg)
-void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
+void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
Register src) {
#if V8_TARGET_ARCH_S390X
locgr(cond, dst, src);
@@ -4279,7 +4252,7 @@ void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
}
// Load Double Precision (64-bit) Floating Point number from memory
-void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) {
+void MacroAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) {
// for 32bit and 64bit we all use 64bit floating point regs
if (is_uint12(mem.offset())) {
ld(dst, mem);
@@ -4289,7 +4262,7 @@ void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) {
}
// Load Single Precision (32-bit) Floating Point number from memory
-void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
+void MacroAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
le_z(dst, mem);
} else {
@@ -4298,7 +4271,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
}
}
-void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
Register scratch) {
DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
@@ -4311,7 +4284,7 @@ void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
}
// Store Double Precision (64-bit) Floating Point number to memory
-void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
+void MacroAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
std(dst, mem);
} else {
@@ -4320,7 +4293,7 @@ void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
}
// Store Single Precision (32-bit) Floating Point number to memory
-void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
+void MacroAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
ste(src, mem);
} else {
@@ -4328,7 +4301,7 @@ void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
}
}
-void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
+void MacroAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
Register scratch) {
DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
@@ -4340,7 +4313,7 @@ void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
}
}
-void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
aebr(dst, rhs);
@@ -4352,7 +4325,7 @@ void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
sebr(dst, rhs);
@@ -4365,7 +4338,7 @@ void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
meebr(dst, rhs);
@@ -4377,7 +4350,7 @@ void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
debr(dst, rhs);
@@ -4393,7 +4366,7 @@ void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
adbr(dst, rhs);
@@ -4405,7 +4378,7 @@ void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
sdbr(dst, rhs);
@@ -4418,7 +4391,7 @@ void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
mdbr(dst, rhs);
@@ -4430,7 +4403,7 @@ void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
+void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (dst == lhs) {
ddbr(dst, rhs);
@@ -4446,7 +4419,7 @@ void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
}
}
-void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
aeb(dst, opnd);
@@ -4456,7 +4429,7 @@ void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
adb(dst, opnd);
@@ -4466,7 +4439,7 @@ void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
seb(dst, opnd);
@@ -4476,7 +4449,7 @@ void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
sdb(dst, opnd);
@@ -4486,7 +4459,7 @@ void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
meeb(dst, opnd);
@@ -4496,7 +4469,7 @@ void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
mdb(dst, opnd);
@@ -4506,7 +4479,7 @@ void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
deb(dst, opnd);
@@ -4516,7 +4489,7 @@ void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ddb(dst, opnd);
@@ -4526,7 +4499,7 @@ void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
+void MacroAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ldeb(dst, opnd);
@@ -4538,7 +4511,7 @@ void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
-void TurboAssembler::StoreU32(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU32(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4569,7 +4542,7 @@ void TurboAssembler::StoreU32(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::LoadS16(Register dst, Register src) {
+void MacroAssembler::LoadS16(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lghr(dst, src);
#else
@@ -4579,8 +4552,8 @@ void TurboAssembler::LoadS16(Register dst, Register src) {
// Loads 16-bits half-word value from memory and sign extends to pointer
// sized register
-void TurboAssembler::LoadS16(Register dst, const MemOperand& mem,
- Register scratch) {
+void MacroAssembler::LoadS16(Register dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4607,7 +4580,7 @@ void TurboAssembler::LoadS16(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void TurboAssembler::StoreU16(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU16(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4625,7 +4598,7 @@ void TurboAssembler::StoreU16(Register src, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void TurboAssembler::StoreU8(Register src, const MemOperand& mem,
+void MacroAssembler::StoreU8(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4642,13 +4615,13 @@ void TurboAssembler::StoreU8(Register src, const MemOperand& mem,
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU32(Register dst, Register src,
+void MacroAssembler::ShiftLeftU32(Register dst, Register src,
const Operand& val) {
ShiftLeftU32(dst, src, r0, val);
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val,
+void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
sll(dst, val, val2);
@@ -4662,25 +4635,25 @@ void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val,
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU64(Register dst, Register src,
+void MacroAssembler::ShiftLeftU64(Register dst, Register src,
const Operand& val) {
ShiftLeftU64(dst, src, r0, val);
}
// Shift left logical for 32-bit integer types.
-void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register val,
+void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register val,
const Operand& val2) {
sllg(dst, src, val, val2);
}
// Shift right logical for 32-bit integer types.
-void TurboAssembler::ShiftRightU32(Register dst, Register src,
+void MacroAssembler::ShiftRightU32(Register dst, Register src,
const Operand& val) {
ShiftRightU32(dst, src, r0, val);
}
// Shift right logical for 32-bit integer types.
-void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightU32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
srl(dst, val, val2);
@@ -4693,25 +4666,25 @@ void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val,
}
}
-void TurboAssembler::ShiftRightU64(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightU64(Register dst, Register src, Register val,
const Operand& val2) {
srlg(dst, src, val, val2);
}
// Shift right logical for 64-bit integer types.
-void TurboAssembler::ShiftRightU64(Register dst, Register src,
+void MacroAssembler::ShiftRightU64(Register dst, Register src,
const Operand& val) {
ShiftRightU64(dst, src, r0, val);
}
// Shift right arithmetic for 32-bit integer types.
-void TurboAssembler::ShiftRightS32(Register dst, Register src,
+void MacroAssembler::ShiftRightS32(Register dst, Register src,
const Operand& val) {
ShiftRightS32(dst, src, r0, val);
}
// Shift right arithmetic for 32-bit integer types.
-void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightS32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
sra(dst, val, val2);
@@ -4725,19 +4698,19 @@ void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val,
}
// Shift right arithmetic for 64-bit integer types.
-void TurboAssembler::ShiftRightS64(Register dst, Register src,
+void MacroAssembler::ShiftRightS64(Register dst, Register src,
const Operand& val) {
ShiftRightS64(dst, src, r0, val);
}
// Shift right arithmetic for 64-bit integer types.
-void TurboAssembler::ShiftRightS64(Register dst, Register src, Register val,
+void MacroAssembler::ShiftRightS64(Register dst, Register src, Register val,
const Operand& val2) {
srag(dst, src, val, val2);
}
// Clear right most # of bits
-void TurboAssembler::ClearRightImm(Register dst, Register src,
+void MacroAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
@@ -4764,7 +4737,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
}
}
-void TurboAssembler::Popcnt32(Register dst, Register src) {
+void MacroAssembler::Popcnt32(Register dst, Register src) {
DCHECK(src != r0);
DCHECK(dst != r0);
@@ -4777,7 +4750,7 @@ void TurboAssembler::Popcnt32(Register dst, Register src) {
}
#ifdef V8_TARGET_ARCH_S390X
-void TurboAssembler::Popcnt64(Register dst, Register src) {
+void MacroAssembler::Popcnt64(Register dst, Register src) {
DCHECK(src != r0);
DCHECK(dst != r0);
@@ -4792,7 +4765,7 @@ void TurboAssembler::Popcnt64(Register dst, Register src) {
}
#endif
-void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
+void MacroAssembler::SwapP(Register src, Register dst, Register scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
mov(scratch, src);
@@ -4800,7 +4773,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
mov(dst, scratch);
}
-void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
+void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
@@ -4809,7 +4782,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
StoreU64(scratch, dst);
}
-void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
+void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
Register scratch_1) {
if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
@@ -4822,7 +4795,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
StoreU64(scratch_1, src);
}
-void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
+void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
@@ -4831,7 +4804,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
ldr(dst, scratch);
}
-void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
+void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
ldr(scratch, src);
@@ -4839,7 +4812,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
StoreF32(scratch, dst);
}
-void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst,
DoubleRegister scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kDoubleSize));
@@ -4853,7 +4826,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kDoubleSize));
}
-void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
+void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
DoubleRegister scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
@@ -4862,7 +4835,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
ldr(dst, scratch);
}
-void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
+void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
ldr(scratch, src);
@@ -4870,7 +4843,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
StoreF64(scratch, dst);
}
-void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst,
DoubleRegister scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kDoubleSize));
@@ -4884,7 +4857,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kDoubleSize));
}
-void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
+void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
Simd128Register scratch) {
if (src == dst) return;
vlr(scratch, src, Condition(0), Condition(0), Condition(0));
@@ -4892,7 +4865,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
vlr(dst, scratch, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
+void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
Simd128Register scratch) {
DCHECK(!AreAliased(src, scratch));
vlr(scratch, src, Condition(0), Condition(0), Condition(0));
@@ -4900,7 +4873,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
StoreV128(scratch, dst, ip);
}
-void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
+void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst,
Simd128Register scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kSimd128Size));
@@ -4914,27 +4887,27 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
-void TurboAssembler::LoadPC(Register dst) {
+void MacroAssembler::LoadPC(Register dst) {
Label current_pc;
larl(dst, &current_pc);
bind(&current_pc);
}
-void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS32(x, Operand(y));
beq(dest);
}
-void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
+void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
CmpS32(x, Operand(y));
blt(dest);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
static_assert(kSystemPointerSize == 8);
static_assert(kSmiTagSize == 1);
static_assert(kSmiTag == 0);
@@ -4951,91 +4924,50 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
ASM_CODE_COMMENT(this);
LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
-
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- Register scratch = r1;
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- tmlh(scratch, Operand(Code::IsOffHeapTrampoline::kMask >> 16));
- bne(&if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- AddS64(destination, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- b(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
- AddS64(destination, destination, kRootRegister);
- LoadU64(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- AddS64(destination, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
+ ASM_CODE_COMMENT(this);
+ LoadU64(destination,
+ FieldMemOperand(code_object, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
- LoadCodeObjectEntry(code_object, code_object);
+void MacroAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
+ LoadCodeEntry(code_object, code_object);
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
+ LoadCodeEntry(code_object, code_object);
Jump(code_object);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
Label return_label;
larl(r14, &return_label); // Generate the return addr of call later.
@@ -5047,7 +4979,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -5059,10 +4991,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::CountLeadingZerosU32(Register dst, Register src,
+void MacroAssembler::CountLeadingZerosU32(Register dst, Register src,
Register scratch_pair) {
llgfr(dst, src);
flogr(scratch_pair,
@@ -5070,14 +5002,14 @@ void TurboAssembler::CountLeadingZerosU32(Register dst, Register src,
AddS32(dst, scratch_pair, Operand(-32));
}
-void TurboAssembler::CountLeadingZerosU64(Register dst, Register src,
+void MacroAssembler::CountLeadingZerosU64(Register dst, Register src,
Register scratch_pair) {
flogr(scratch_pair,
src); // will modify a register pair scratch and scratch + 1
mov(dst, scratch_pair);
}
-void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
+void MacroAssembler::CountTrailingZerosU32(Register dst, Register src,
Register scratch_pair) {
Register scratch0 = scratch_pair;
Register scratch1 = Register::from_code(scratch_pair.code() + 1);
@@ -5098,7 +5030,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
bind(&done);
}
-void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
+void MacroAssembler::CountTrailingZerosU64(Register dst, Register src,
Register scratch_pair) {
Register scratch0 = scratch_pair;
Register scratch1 = Register::from_code(scratch_pair.code() + 1);
@@ -5118,7 +5050,7 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
bind(&done);
}
-void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
+void MacroAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value,
Register new_value, int start,
int end, int shift_amount,
@@ -5136,7 +5068,7 @@ void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
Operand(64 - shift_amount), true);
}
-void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output,
+void MacroAssembler::AtomicCmpExchangeU8(Register addr, Register output,
Register old_value, Register new_value,
Register temp0, Register temp1) {
#ifdef V8_TARGET_BIG_ENDIAN
@@ -5185,7 +5117,7 @@ void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output,
bind(&done);
}
-void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output,
+void MacroAssembler::AtomicCmpExchangeU16(Register addr, Register output,
Register old_value,
Register new_value, Register temp0,
Register temp1) {
@@ -5223,7 +5155,7 @@ void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output,
bind(&done);
}
-void TurboAssembler::AtomicExchangeHelper(Register addr, Register value,
+void MacroAssembler::AtomicExchangeHelper(Register addr, Register value,
Register output, int start, int end,
int shift_amount, int offset,
Register scratch) {
@@ -5238,7 +5170,7 @@ void TurboAssembler::AtomicExchangeHelper(Register addr, Register value,
srl(output, Operand(shift_amount));
}
-void TurboAssembler::AtomicExchangeU8(Register addr, Register value,
+void MacroAssembler::AtomicExchangeU8(Register addr, Register value,
Register output, Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_EXCHANGE_BYTE(i) \
@@ -5290,7 +5222,7 @@ void TurboAssembler::AtomicExchangeU8(Register addr, Register value,
bind(&done);
}
-void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
+void MacroAssembler::AtomicExchangeU16(Register addr, Register value,
Register output, Register scratch) {
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_EXCHANGE_HALFWORD(i) \
@@ -5331,77 +5263,77 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
}
// Simd Support.
-void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(3));
}
-void TurboAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(2));
}
-void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(3));
vrep(dst, dst, Operand(0), Condition(3));
}
-void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(2));
vrep(dst, dst, Operand(0), Condition(2));
}
-void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(1));
vrep(dst, dst, Operand(0), Condition(1));
}
-void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) {
+void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(0));
vrep(dst, dst, Operand(0), Condition(0));
}
-void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
+void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
+void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
+void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
+void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
+void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
}
-void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
+void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register scratch) {
vlgv(scratch, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
lghr(dst, scratch);
}
-void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
+void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register) {
vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
-void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
+void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src,
uint8_t imm_lane_idx, Register scratch) {
vlgv(scratch, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
lgbr(dst, scratch);
}
-void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
DoubleRegister src2, uint8_t imm_lane_idx,
Register scratch) {
vlgv(scratch, src2, MemOperand(r0, 0), Condition(3));
@@ -5411,7 +5343,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, scratch, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
DoubleRegister src2, uint8_t imm_lane_idx,
Register scratch) {
vlgv(scratch, src2, MemOperand(r0, 0), Condition(2));
@@ -5421,7 +5353,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, scratch, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5430,7 +5362,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
}
-void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5439,7 +5371,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
}
-void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5448,7 +5380,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
}
-void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx,
Register) {
if (src1 != dst) {
@@ -5457,19 +5389,19 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
-void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) {
vno(dst, src, src, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::S128Zero(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128Zero(Simd128Register dst, Simd128Register src) {
vx(dst, src, src, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) {
+void MacroAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) {
vceq(dst, src, src, Condition(0), Condition(3));
}
-void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register mask) {
vsel(dst, src1, src2, mask, Condition(0), Condition(0));
}
@@ -5512,7 +5444,7 @@ void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1,
V(I8x16Popcnt, vpopct, 0, 0, 0)
#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \
}
SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
@@ -5533,7 +5465,7 @@ SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
V(I8x16GtU, vchl, 0, 0)
#define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2, Condition(c1), Condition(c2)); \
}
@@ -5590,7 +5522,7 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
V(S128AndNot, vnc, 0, 0, 0)
#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \
}
@@ -5613,13 +5545,13 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
V(I8x16ShrU, vesrlv, 0)
#define EMIT_SIMD_SHIFT(name, op, c1) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Register src2, Simd128Register scratch) { \
vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \
vrep(scratch, scratch, Operand(0), Condition(c1)); \
op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \
} \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
const Operand& src2, Register scratch1, \
Simd128Register scratch2) { \
mov(scratch1, src2); \
@@ -5644,7 +5576,7 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0)
#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2, Simd128Register scratch) { \
mul_even(scratch, src1, src2, Condition(0), Condition(0), \
Condition(mode)); \
@@ -5662,7 +5594,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
V(I8x16AllTrue, 0)
#define EMIT_SIMD_ALL_TRUE(name, mode) \
- void TurboAssembler::name(Register dst, Simd128Register src, \
+ void MacroAssembler::name(Register dst, Simd128Register src, \
Register scratch1, Simd128Register scratch2) { \
mov(scratch1, Operand(1)); \
xgr(dst, dst); \
@@ -5683,15 +5615,15 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
V(F32x4Qfms, vfnms, 2)
#define EMIT_SIMD_QFM(name, op, c1) \
- void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2, Simd128Register src3) { \
- op(dst, src2, src3, src1, Condition(c1), Condition(0)); \
+ op(dst, src1, src2, src3, Condition(c1), Condition(0)); \
}
SIMD_QFM_LIST(EMIT_SIMD_QFM)
#undef EMIT_SIMD_QFM
#undef SIMD_QFM_LIST
-void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Register scratch1,
Register scratch2, Register scratch3) {
Register scratch_1 = scratch1;
@@ -5706,112 +5638,112 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
vlvgp(dst, scratch1, scratch2);
}
-void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
-void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vceq(scratch, src1, src2, Condition(0), Condition(2));
vchl(dst, src1, src2, Condition(0), Condition(2));
vo(dst, dst, scratch, Condition(0), Condition(0), Condition(2));
}
-void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(1));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(1));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vceq(scratch, src1, src2, Condition(0), Condition(1));
vchl(dst, src1, src2, Condition(0), Condition(1));
vo(dst, dst, scratch, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(0));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(0));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Simd128Register scratch) {
vceq(scratch, src1, src2, Condition(0), Condition(0));
vchl(dst, src1, src2, Condition(0), Condition(0));
vo(dst, dst, scratch, Condition(0), Condition(0), Condition(0));
}
-void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I64x2BitMask(Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2) {
mov(scratch1, Operand(0x8080808080800040));
vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
@@ -5819,7 +5751,7 @@ void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
}
-void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I32x4BitMask(Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2) {
mov(scratch1, Operand(0x8080808000204060));
vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
@@ -5827,7 +5759,7 @@ void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
}
-void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I16x8BitMask(Register dst, Simd128Register src,
Register scratch1, Simd128Register scratch2) {
mov(scratch1, Operand(0x10203040506070));
vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
@@ -5835,19 +5767,19 @@ void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
}
-void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
+void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst,
Simd128Register src) {
vupl(dst, src, Condition(0), Condition(0), Condition(2));
vcdg(dst, dst, Condition(4), Condition(0), Condition(3));
}
-void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
+void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst,
Simd128Register src) {
vupll(dst, src, Condition(0), Condition(0), Condition(2));
vcdlg(dst, dst, Condition(4), Condition(0), Condition(3));
}
-void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src,
+void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src,
Register scratch1, Register scratch2,
Simd128Register scratch3) {
mov(scratch1, Operand(0x4048505860687078));
@@ -5857,7 +5789,7 @@ void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src,
vlgv(dst, scratch3, MemOperand(r0, 3), Condition(1));
}
-void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
+void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src,
Register scratch) {
mov(dst, Operand(1));
xgr(scratch, scratch);
@@ -5872,7 +5804,7 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src,
convert(scratch2, scratch1, kRoundToZero); \
vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
}
-void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst,
+void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5886,7 +5818,7 @@ void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst,
}
}
-void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst,
+void MacroAssembler::I32x4UConvertF32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5908,7 +5840,7 @@ void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst,
MovFloatToInt(scratch2, scratch1); \
vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
}
-void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst,
+void MacroAssembler::F32x4SConvertI32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5918,7 +5850,7 @@ void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst,
CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, dst, src, scratch1, scratch2)
}
}
-void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst,
+void MacroAssembler::F32x4UConvertI32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2) {
@@ -5931,13 +5863,13 @@ void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst,
}
#undef CONVERT_INT32_TO_FLOAT
-void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst,
+void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpks(dst, src2, src1, Condition(0), Condition(2));
}
-void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
+void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst,
Simd128Register src1,
Simd128Register src2) {
vpks(dst, src2, src1, Condition(0), Condition(1));
@@ -5949,7 +5881,7 @@ void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst,
vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \
Condition(mode)); \
vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode));
-void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
+void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -5958,7 +5890,7 @@ void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst,
vpkls(dst, dst, scratch, Condition(0), Condition(2));
}
-void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
+void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst,
Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
@@ -5980,7 +5912,7 @@ void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst,
extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \
Condition(mode + 1));
-void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -5988,7 +5920,7 @@ void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -5996,7 +5928,7 @@ void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6004,7 +5936,7 @@ void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
vpkls(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6018,7 +5950,7 @@ void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
vpkls(dst, dst, scratch1, Condition(0), Condition(2));
}
-void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6026,7 +5958,7 @@ void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(1));
}
-void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6034,7 +5966,7 @@ void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
vpks(dst, dst, scratch1, Condition(0), Condition(1));
}
-void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6042,7 +5974,7 @@ void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
vpkls(dst, dst, scratch1, Condition(0), Condition(1));
}
-void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6057,7 +5989,7 @@ void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
}
#undef BINOP_EXTRACT
-void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
+void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2, Register scratch3,
@@ -6073,7 +6005,7 @@ void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst,
vlvgp(dst, scratch3, scratch4);
}
-void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
+void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Register scratch2, Register scratch3,
@@ -6101,14 +6033,14 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst,
Condition(lane_size)); \
va(dst, scratch1, scratch2, Condition(0), Condition(0), \
Condition(lane_size + 1));
-void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst,
+void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Simd128Register scratch2) {
EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo)
}
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
+void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
Simd128Register src,
Simd128Register scratch,
Simd128Register scratch2) {
@@ -6116,14 +6048,14 @@ void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst,
vsum(dst, src, scratch, Condition(0), Condition(0), Condition(1));
}
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst,
+void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Simd128Register scratch2) {
EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo)
}
-void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
+void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
Simd128Register src,
Simd128Register scratch1,
Simd128Register scratch2) {
@@ -6131,7 +6063,7 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst,
}
#undef EXT_ADD_PAIRWISE
-void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
+void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
Simd128Register src,
Simd128Register scratch) {
// NaN to 0.
@@ -6143,7 +6075,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst,
vpks(dst, dst, scratch, Condition(0), Condition(3));
}
-void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
+void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
Simd128Register src,
Simd128Register scratch) {
vclgd(scratch, src, Condition(5), Condition(0), Condition(3));
@@ -6151,14 +6083,14 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst,
vpkls(dst, dst, scratch, Condition(0), Condition(3));
}
-void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
+void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
Register scratch1, Register scratch2) {
mov(scratch1, Operand(low));
mov(scratch2, Operand(high));
vlvgp(dst, scratch2, scratch1);
}
-void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
Simd128Register src2, Register scratch1,
Register scratch2, Simd128Register scratch3,
Simd128Register scratch4) {
@@ -6178,7 +6110,7 @@ void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1,
vperm(dst, dst, scratch3, scratch4, Condition(0), Condition(0));
}
-void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
Simd128Register src2, uint64_t high,
uint64_t low, Register scratch1,
Register scratch2, Simd128Register scratch3) {
@@ -6188,7 +6120,7 @@ void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1,
vperm(dst, src1, src2, scratch3, Condition(0), Condition(0));
}
-void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch) {
vme(scratch, src1, src2, Condition(0), Condition(0), Condition(1));
@@ -6196,6 +6128,30 @@ void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
va(dst, scratch, dst, Condition(0), Condition(0), Condition(2));
}
+void MacroAssembler::I32x4DotI8x16AddS(
+ Simd128Register dst, Simd128Register src1, Simd128Register src2,
+ Simd128Register src3, Simd128Register scratch1, Simd128Register scratch2) {
+ // I8 -> I16.
+ vme(scratch1, src1, src2, Condition(0), Condition(0), Condition(0));
+ vmo(dst, src1, src2, Condition(0), Condition(0), Condition(0));
+ va(dst, scratch1, dst, Condition(0), Condition(0), Condition(1));
+ // I16 -> I32.
+ vrepi(scratch2, Operand(1), Condition(1));
+ vme(scratch1, dst, scratch2, Condition(0), Condition(0), Condition(1));
+ vmo(dst, dst, scratch2, Condition(0), Condition(0), Condition(1));
+ va(dst, scratch1, dst, Condition(0), Condition(0), Condition(2));
+ // Add src3.
+ va(dst, dst, src3, Condition(0), Condition(0), Condition(2));
+}
+
+void MacroAssembler::I16x8DotI8x16S(Simd128Register dst, Simd128Register src1,
+ Simd128Register src2,
+ Simd128Register scratch) {
+ vme(scratch, src1, src2, Condition(0), Condition(0), Condition(0));
+ vmo(dst, src1, src2, Condition(0), Condition(0), Condition(0));
+ va(dst, scratch, dst, Condition(0), Condition(0), Condition(1));
+}
+
#define Q15_MUL_ROAUND(accumulator, src1, src2, const_val, scratch, unpack) \
unpack(scratch, src1, Condition(0), Condition(0), Condition(1)); \
unpack(accumulator, src2, Condition(0), Condition(0), Condition(1)); \
@@ -6206,7 +6162,7 @@ void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1,
vrepi(scratch, Operand(15), Condition(2)); \
vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \
Condition(2));
-void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
+void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
Simd128Register src2,
Simd128Register scratch1,
Simd128Register scratch2,
@@ -6236,7 +6192,7 @@ void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1,
V(8x16, vlrep, LoadU8, 0)
#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadAndSplat##name##LE( \
+ void MacroAssembler::LoadAndSplat##name##LE( \
Simd128Register dst, const MemOperand& mem, Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
vector_instr(dst, mem, Condition(condition)); \
@@ -6259,7 +6215,7 @@ LOAD_SPLAT_LIST(LOAD_SPLAT)
V(8x8S, vuph, 0)
#define LOAD_EXTEND(name, unpack_instr, condition) \
- void TurboAssembler::LoadAndExtend##name##LE( \
+ void MacroAssembler::LoadAndExtend##name##LE( \
Simd128Register dst, const MemOperand& mem, Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
vlebrg(dst, mem, Condition(0)); \
@@ -6273,7 +6229,7 @@ LOAD_EXTEND_LIST(LOAD_EXTEND)
#undef LOAD_EXTEND
#undef LOAD_EXTEND
-void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
Register scratch) {
vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
@@ -6284,7 +6240,7 @@ void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem,
vlvg(dst, scratch, MemOperand(r0, 3), Condition(2));
}
-void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
+void MacroAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
Register scratch) {
vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
@@ -6302,7 +6258,7 @@ void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem,
V(8, vleb, LoadU8, 0)
#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ void MacroAssembler::LoadLane##name##LE(Simd128Register dst, \
const MemOperand& mem, int lane, \
Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
@@ -6323,7 +6279,7 @@ LOAD_LANE_LIST(LOAD_LANE)
V(8, vsteb, StoreU8, 0)
#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ void MacroAssembler::StoreLane##name##LE(Simd128Register src, \
const MemOperand& mem, int lane, \
Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
@@ -6347,10 +6303,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
LoadU64(destination, MemOperand(kRootRegister, offset));
}