summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/riscv/macro-assembler-riscv.cc')
-rw-r--r--deps/v8/src/codegen/riscv/macro-assembler-riscv.cc1116
1 files changed, 508 insertions, 608 deletions
diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
index 00a89a40c1..3fa69f10e5 100644
--- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
+++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc
@@ -41,7 +41,7 @@ static inline bool IsZero(const Operand& rt) {
}
}
-int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
@@ -58,7 +58,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
@@ -75,7 +75,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
@@ -114,8 +114,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1,
- &heal_optimized_code_slot);
+ __ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch1,
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -124,7 +124,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ LoadCodeEntry(a2, optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -175,7 +175,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode(
CallRuntime(function_id, 1);
// Use the return value before restoring a0
- AddWord(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ LoadCodeEntry(a2, a0);
// Restore target function, new target and actual argument count.
Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
kJavaScriptCallArgCountRegister);
@@ -238,20 +238,19 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
- LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
+ LoadTaggedField(optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
temps.Acquire());
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
LoadWord(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-void TurboAssembler::LoadRoot(Register destination, RootIndex index,
+void MacroAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Label skip;
@@ -261,7 +260,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
bind(&skip);
}
-void TurboAssembler::PushCommonFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
AddWord(fp, sp, Operand(kSystemPointerSize));
@@ -271,7 +270,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
}
}
-void TurboAssembler::PushStandardFrame(Register function_reg) {
+void MacroAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
@@ -328,17 +327,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
bind(&done);
}
-void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+void MacroAssembler::MaybeSaveRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPush(registers);
}
-void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
if (registers.is_empty()) return;
MultiPop(registers);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+void MacroAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, slot_address));
@@ -361,7 +360,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
+void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
@@ -384,7 +383,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
+void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
SaveFPRegsMode fp_mode,
StubCallMode mode) {
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
@@ -413,7 +412,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Register temp = temps.Acquire();
DCHECK(!AreAliased(object, value, temp));
AddWord(temp, object, offset);
- LoadTaggedPointerField(temp, MemOperand(temp));
+ LoadTaggedField(temp, MemOperand(temp));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
Operand(value));
}
@@ -436,7 +435,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Register temp = temps.Acquire();
CheckPageFlag(value,
temp, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
+ MemoryChunk::kPointersToHereAreInterestingMask,
eq, // In RISC-V, it uses cc for a comparison with 0, so if
// no bits are set, and cc is eq, it will branch to done
&done);
@@ -469,7 +468,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// ---------------------------------------------------------------------------
// Instruction macros.
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -499,7 +498,7 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -541,15 +540,15 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
Add64(rd, rs, rt);
}
-void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
Sub64(rd, rs, rt);
}
-void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -598,7 +597,7 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Add64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
(rt.rm() != zero_reg) && (rs != zero_reg)) {
@@ -638,7 +637,7 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulw(rd, rs, rt.rm());
} else {
@@ -650,7 +649,7 @@ void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -663,7 +662,7 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
srai(rd, rd, 32);
}
-void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
+void MacroAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
Register rsz, Register rtz) {
slli(rsz, rs, 32);
if (rt.is_reg()) {
@@ -675,7 +674,7 @@ void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
srai(rd, rd, 32);
}
-void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -687,7 +686,7 @@ void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulh(rd, rs, rt.rm());
} else {
@@ -699,7 +698,7 @@ void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulhu(rd, rs, rt.rm());
} else {
@@ -711,7 +710,7 @@ void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Div32(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divw(res, rs, rt.rm());
} else {
@@ -723,7 +722,7 @@ void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remw(rd, rs, rt.rm());
} else {
@@ -735,7 +734,7 @@ void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remuw(rd, rs, rt.rm());
} else {
@@ -747,7 +746,7 @@ void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Div64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rd, rs, rt.rm());
} else {
@@ -759,7 +758,7 @@ void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu32(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divuw(res, rs, rt.rm());
} else {
@@ -771,7 +770,7 @@ void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu64(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(res, rs, rt.rm());
} else {
@@ -783,7 +782,7 @@ void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rem(rd, rs, rt.rm());
} else {
@@ -795,7 +794,7 @@ void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remu(rd, rs, rt.rm());
} else {
@@ -807,11 +806,11 @@ void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
}
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
Add32(rd, rs, rt);
}
-void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
(rt.rm() != zero_reg) && (rs != zero_reg)) {
@@ -851,11 +850,11 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
Sub32(rd, rs, rt);
}
-void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -905,11 +904,11 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
Mul(rd, rs, rt);
}
-void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -921,7 +920,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mulh(rd, rs, rt.rm());
} else {
@@ -933,7 +932,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
+void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
Register rsz, Register rtz) {
if (rt.is_reg()) {
mulhu(rd, rs, rt.rm());
@@ -946,7 +945,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
}
}
-void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(res, rs, rt.rm());
} else {
@@ -958,7 +957,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rem(rd, rs, rt.rm());
} else {
@@ -970,7 +969,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
remu(rd, rs, rt.rm());
} else {
@@ -982,7 +981,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(res, rs, rt.rm());
} else {
@@ -996,7 +995,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
#endif
-void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -1022,7 +1021,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -1044,7 +1043,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
((rd.code() & 0b11000) == 0b01000) &&
@@ -1066,7 +1065,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
not_(rd, rd);
@@ -1076,12 +1075,12 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Neg(Register rs, const Operand& rt) {
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
DCHECK(rt.is_reg());
neg(rs, rt.rm());
}
-void TurboAssembler::Seqz(Register rd, const Operand& rt) {
+void MacroAssembler::Seqz(Register rd, const Operand& rt) {
if (rt.is_reg()) {
seqz(rd, rt.rm());
} else {
@@ -1089,7 +1088,7 @@ void TurboAssembler::Seqz(Register rd, const Operand& rt) {
}
}
-void TurboAssembler::Snez(Register rd, const Operand& rt) {
+void MacroAssembler::Snez(Register rd, const Operand& rt) {
if (rt.is_reg()) {
snez(rd, rt.rm());
} else {
@@ -1097,7 +1096,7 @@ void TurboAssembler::Snez(Register rd, const Operand& rt) {
}
}
-void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Seq(Register rd, Register rs, const Operand& rt) {
if (rs == zero_reg) {
Seqz(rd, rt);
} else if (IsZero(rt)) {
@@ -1108,7 +1107,7 @@ void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sne(Register rd, Register rs, const Operand& rt) {
if (rs == zero_reg) {
Snez(rd, rt);
} else if (IsZero(rt)) {
@@ -1119,7 +1118,7 @@ void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
@@ -1136,7 +1135,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
@@ -1153,7 +1152,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
@@ -1167,7 +1166,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
@@ -1181,17 +1180,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
xori(rd, rd, 1);
}
-void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) {
Slt(rd, rs, rt);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
Sltu(rd, rs, rt);
xori(rd, rd, 1);
}
-void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
@@ -1204,7 +1203,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
@@ -1218,7 +1217,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sllw(rd, rs, rt.rm());
} else {
@@ -1227,7 +1226,7 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sraw(rd, rs, rt.rm());
} else {
@@ -1236,7 +1235,7 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srlw(rd, rs, rt.rm());
} else {
@@ -1245,11 +1244,11 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
Sra64(rd, rs, rt);
}
-void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sra(rd, rs, rt.rm());
} else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
@@ -1262,11 +1261,11 @@ void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
Srl64(rd, rs, rt);
}
-void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srl(rd, rs, rt.rm());
} else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
@@ -1279,11 +1278,11 @@ void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
Sll64(rd, rs, rt);
}
-void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sll(rd, rs, rt.rm());
} else {
@@ -1297,7 +1296,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1322,7 +1321,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1345,11 +1344,11 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
Sll32(rd, rs, rt);
}
-void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sll(rd, rs, rt.rm());
} else {
@@ -1358,11 +1357,11 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
Sra32(rd, rs, rt);
}
-void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sra(rd, rs, rt.rm());
} else {
@@ -1371,11 +1370,11 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
Srl32(rd, rs, rt);
}
-void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
srl(rd, rs, rt.rm());
} else {
@@ -1384,7 +1383,7 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
}
}
-void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1408,7 +1407,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
#endif
-void TurboAssembler::Li(Register rd, intptr_t imm) {
+void MacroAssembler::Li(Register rd, intptr_t imm) {
if (v8_flags.riscv_c_extension && (rd != zero_reg) && is_int6(imm)) {
c_li(rd, imm);
} else {
@@ -1416,7 +1415,7 @@ void TurboAssembler::Li(Register rd, intptr_t imm) {
}
}
-void TurboAssembler::Mv(Register rd, const Operand& rt) {
+void MacroAssembler::Mv(Register rd, const Operand& rt) {
if (v8_flags.riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) {
c_mv(rd, rt.rm());
} else {
@@ -1424,7 +1423,7 @@ void TurboAssembler::Mv(Register rd, const Operand& rt) {
}
}
-void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
+void MacroAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
uint8_t sa) {
DCHECK(sa >= 1 && sa <= 31);
UseScratchRegisterScope temps(this);
@@ -1437,7 +1436,7 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
// ------------Pseudo-instructions-------------
// Change endianness
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size,
Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
@@ -1495,7 +1494,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size,
Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
@@ -1522,7 +1521,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
#endif
template <int NBYTES, bool LOAD_SIGNED>
-void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs,
+void MacroAssembler::LoadNBytes(Register rd, const MemOperand& rs,
Register scratch) {
DCHECK(rd != rs.rm() && rd != scratch);
DCHECK_LE(NBYTES, 8);
@@ -1544,7 +1543,7 @@ void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs,
}
template <int NBYTES, bool LOAD_SIGNED>
-void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
+void MacroAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
Register scratch0,
Register scratch1) {
// This function loads nbytes from memory specified by rs and into rs.rm()
@@ -1573,7 +1572,7 @@ void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
}
template <int NBYTES, bool IS_SIGNED>
-void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
+void MacroAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
@@ -1604,7 +1603,7 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
#if V8_TARGET_ARCH_RISCV64
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
Register scratch_base) {
DCHECK(NBYTES == 4 || NBYTES == 8);
DCHECK_NE(scratch_base, rs.rm());
@@ -1629,7 +1628,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
}
#elif V8_TARGET_ARCH_RISCV32
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
Register scratch_base) {
DCHECK_EQ(NBYTES, 4);
DCHECK_NE(scratch_base, rs.rm());
@@ -1650,7 +1649,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
fmv_w_x(frd, scratch);
}
-void TurboAssembler::UnalignedDoubleHelper(FPURegister frd,
+void MacroAssembler::UnalignedDoubleHelper(FPURegister frd,
const MemOperand& rs,
Register scratch_base) {
DCHECK_NE(scratch_base, rs.rm());
@@ -1679,7 +1678,7 @@ void TurboAssembler::UnalignedDoubleHelper(FPURegister frd,
#endif
template <int NBYTES>
-void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
+void MacroAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
Register scratch_other) {
DCHECK(scratch_other != rs.rm());
DCHECK_LE(NBYTES, 8);
@@ -1718,7 +1717,7 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
#if V8_TARGET_ARCH_RISCV64
template <int NBYTES>
-void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
+void MacroAssembler::UnalignedFStoreHelper(FPURegister frd,
const MemOperand& rs,
Register scratch) {
DCHECK(NBYTES == 8 || NBYTES == 4);
@@ -1732,7 +1731,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
}
#elif V8_TARGET_ARCH_RISCV32
template <int NBYTES>
-void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
+void MacroAssembler::UnalignedFStoreHelper(FPURegister frd,
const MemOperand& rs,
Register scratch) {
DCHECK_EQ(NBYTES, 4);
@@ -1740,7 +1739,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
fmv_x_w(scratch, frd);
UnalignedStoreHelper<NBYTES>(scratch, rs);
}
-void TurboAssembler::UnalignedDStoreHelper(FPURegister frd,
+void MacroAssembler::UnalignedDStoreHelper(FPURegister frd,
const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
@@ -1757,7 +1756,7 @@ void TurboAssembler::UnalignedDStoreHelper(FPURegister frd,
#endif
template <typename Reg_T, typename Func>
-void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
+void MacroAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
Func generator) {
MemOperand source = rs;
UseScratchRegisterScope temps(this);
@@ -1771,7 +1770,7 @@ void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
}
template <typename Reg_T, typename Func>
-void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
+void MacroAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
Func generator) {
MemOperand source = rs;
UseScratchRegisterScope temps(this);
@@ -1787,32 +1786,32 @@ void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
generator(value, source);
}
-void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<4, true>(rd, rs);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<4, false>(rd, rs);
}
#endif
-void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
UnalignedStoreHelper<4>(rd, rs);
}
-void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<2, true>(rd, rs);
}
-void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<2, false>(rd, rs);
}
-void TurboAssembler::Ush(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ush(Register rd, const MemOperand& rs) {
UnalignedStoreHelper<2>(rd, rs);
}
-void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
UnalignedLoadHelper<8, true>(rd, rs);
}
#if V8_TARGET_ARCH_RISCV64
@@ -1838,23 +1837,23 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
}
#endif
-void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
UnalignedStoreHelper<8>(rd, rs);
}
-void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
UnalignedFLoadHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
UnalignedFStoreHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
#if V8_TARGET_ARCH_RISCV64
@@ -1864,7 +1863,7 @@ void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
#endif
}
-void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
+void MacroAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK_NE(scratch, rs.rm());
#if V8_TARGET_ARCH_RISCV64
@@ -1874,49 +1873,49 @@ void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
#endif
}
-void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lb(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lbu(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
this->sb(value, source.rm(), source.offset());
};
AlignedStoreHelper(rd, rs, fn);
}
-void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lh(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lhu(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
this->sh(value, source.rm(), source.offset());
};
AlignedStoreHelper(rd, rs, fn);
}
-void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1934,14 +1933,14 @@ void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
this->lwu(target, source.rm(), source.offset());
};
AlignedLoadHelper(rd, rs, fn);
}
#endif
-void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1958,7 +1957,7 @@ void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
auto fn = [this](Register target, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1975,7 +1974,7 @@ void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
AlignedLoadHelper(rd, rs, fn);
}
-void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
auto fn = [this](Register value, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -1991,21 +1990,21 @@ void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
AlignedStoreHelper(rd, rs, fn);
}
#endif
-void TurboAssembler::LoadFloat(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::LoadFloat(FPURegister fd, const MemOperand& src) {
auto fn = [this](FPURegister target, const MemOperand& source) {
this->flw(target, source.rm(), source.offset());
};
AlignedLoadHelper(fd, src, fn);
}
-void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
auto fn = [this](FPURegister value, const MemOperand& source) {
this->fsw(value, source.rm(), source.offset());
};
AlignedStoreHelper(fs, src, fn);
}
-void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
+void MacroAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
auto fn = [this](FPURegister target, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -2021,7 +2020,7 @@ void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
AlignedLoadHelper(fd, src, fn);
}
-void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
+void MacroAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
auto fn = [this](FPURegister value, const MemOperand& source) {
if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
((source.rm().code() & 0b11000) == 0b01000) &&
@@ -2037,7 +2036,7 @@ void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
AlignedStoreHelper(fs, src, fn);
}
-void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
+void MacroAssembler::Ll(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
lr_w(false, false, rd, rs.rm());
@@ -2050,7 +2049,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
+void MacroAssembler::Lld(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
lr_d(false, false, rd, rs.rm());
@@ -2062,7 +2061,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
}
}
#endif
-void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
+void MacroAssembler::Sc(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
sc_w(false, false, rd, rs.rm(), rd);
@@ -2074,7 +2073,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
}
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
+void MacroAssembler::Scd(Register rd, const MemOperand& rs) {
bool is_one_instruction = rs.offset() == 0;
if (is_one_instruction) {
sc_d(false, false, rd, rs.rm(), rd);
@@ -2086,7 +2085,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
}
}
#endif
-void TurboAssembler::li(Register dst, Handle<HeapObject> value,
+void MacroAssembler::li(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
@@ -2104,7 +2103,7 @@ void TurboAssembler::li(Register dst, Handle<HeapObject> value,
}
}
-void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
@@ -2124,30 +2123,30 @@ static inline int InstrCountForLiLower32Bit(int64_t value) {
return 2;
}
-int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
if (is_int32(value + 0x800)) {
return InstrCountForLiLower32Bit(value);
} else {
- return li_estimate(value);
+ return RV_li_count(value);
}
UNREACHABLE();
return INT_MAX;
}
-void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
Li(rd, j.immediate());
}
-void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
UseScratchRegisterScope temps(this);
- int count = li_estimate(j.immediate(), temps.hasAvailable());
- int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable());
+ int count = RV_li_count(j.immediate(), temps.hasAvailable());
+ int reverse_count = RV_li_count(~j.immediate(), temps.hasAvailable());
if (v8_flags.riscv_constant_pool && count >= 4 && reverse_count >= 4) {
// Ld/Lw a Address from a constant pool.
RecordEntry((uintptr_t)j.immediate(), j.rmode());
@@ -2188,7 +2187,7 @@ static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6};
static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7};
static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11};
-void TurboAssembler::MultiPush(RegList regs) {
+void MacroAssembler::MultiPush(RegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kSystemPointerSize;
@@ -2232,7 +2231,7 @@ void TurboAssembler::MultiPush(RegList regs) {
#undef S_REGS
}
-void TurboAssembler::MultiPop(RegList regs) {
+void MacroAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
#define TEST_AND_POP_REG(reg) \
@@ -2273,7 +2272,7 @@ void TurboAssembler::MultiPop(RegList regs) {
#undef A_REGS
}
-void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPushFPU(DoubleRegList regs) {
int16_t num_to_push = regs.Count();
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -2286,7 +2285,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) {
}
}
-void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
+void MacroAssembler::MultiPopFPU(DoubleRegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -2299,7 +2298,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) {
}
#if V8_TARGET_ARCH_RISCV32
-void TurboAssembler::AddPair(Register dst_low, Register dst_high,
+void MacroAssembler::AddPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
Register scratch1, Register scratch2) {
@@ -2317,7 +2316,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high,
Move(dst_low, scratch1);
}
-void TurboAssembler::SubPair(Register dst_low, Register dst_high,
+void MacroAssembler::SubPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
Register scratch1, Register scratch2) {
@@ -2335,27 +2334,27 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high,
Move(dst_low, scratch1);
}
-void TurboAssembler::AndPair(Register dst_low, Register dst_high,
+void MacroAssembler::AndPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
And(dst_low, left_low, right_low);
And(dst_high, left_high, right_high);
}
-void TurboAssembler::OrPair(Register dst_low, Register dst_high,
+void MacroAssembler::OrPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
Or(dst_low, left_low, right_low);
Or(dst_high, left_high, right_high);
}
-void TurboAssembler::XorPair(Register dst_low, Register dst_high,
+void MacroAssembler::XorPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
Xor(dst_low, left_low, right_low);
Xor(dst_high, left_high, right_high);
}
-void TurboAssembler::MulPair(Register dst_low, Register dst_high,
+void MacroAssembler::MulPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high,
Register scratch1, Register scratch2) {
@@ -2381,7 +2380,7 @@ void TurboAssembler::MulPair(Register dst_low, Register dst_high,
Add32(dst_high, scratch2, scratch3);
}
-void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift, Register scratch1,
Register scratch2) {
@@ -2426,7 +2425,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, int32_t shift,
Register scratch1, Register scratch2) {
DCHECK_GE(63, shift);
@@ -2451,7 +2450,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift, Register scratch1,
Register scratch2) {
@@ -2496,7 +2495,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, int32_t shift,
Register scratch1, Register scratch2) {
DCHECK_GE(63, shift);
@@ -2521,7 +2520,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
}
}
-void TurboAssembler::SarPair(Register dst_low, Register dst_high,
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift, Register scratch1,
Register scratch2) {
@@ -2564,7 +2563,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
bind(&done);
}
-void TurboAssembler::SarPair(Register dst_low, Register dst_high,
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, int32_t shift,
Register scratch1, Register scratch2) {
DCHECK_GE(63, shift);
@@ -2589,7 +2588,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
}
#endif
-void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
+void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
uint16_t size, bool sign_extend) {
#if V8_TARGET_ARCH_RISCV64
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
@@ -2615,7 +2614,7 @@ void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
#endif
}
-void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+void MacroAssembler::InsertBits(Register dest, Register source, Register pos,
int size) {
#if V8_TARGET_ARCH_RISCV64
DCHECK_LT(size, 64);
@@ -2641,42 +2640,42 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
or_(dest, dest, source_);
}
-void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); }
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); }
-void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); }
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); }
-void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_d_wu(fd, rs);
}
-void TurboAssembler::Cvt_d_w(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_w(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_d_w(fd, rs);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_d_lu(fd, rs);
}
#endif
-void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_s_wu(fd, rs);
}
-void TurboAssembler::Cvt_s_w(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_w(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_s_w(fd, rs);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
fcvt_s_lu(fd, rs);
}
#endif
template <typename CvtFunc>
-void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
+void MacroAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
Register result,
CvtFunc fcvt_generator) {
// Save csr_fflags to scratch & clear exception flags
@@ -2705,7 +2704,7 @@ void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
}
}
-void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
+void MacroAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
Label no_nan;
feq_d(kScratchReg, fs, fs);
bnez(kScratchReg, &no_nan);
@@ -2713,7 +2712,7 @@ void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
bind(&no_nan);
}
-void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
+void MacroAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
Label no_nan;
feq_s(kScratchReg, fs, fs);
bnez(kScratchReg, &no_nan);
@@ -2721,101 +2720,101 @@ void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
bind(&no_nan);
}
-void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_wu_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_wu_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_wu_s(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RTZ);
});
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_lu_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_l_d(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_d(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_lu_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_lu_s(dst, src, RTZ);
});
}
-void TurboAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_l_s(dst, src, RTZ);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_l_s(dst, src, RTZ);
});
}
#endif
-void TurboAssembler::Round_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Round_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RNE);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RNE);
});
}
-void TurboAssembler::Round_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Round_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RNE);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RNE);
});
}
-void TurboAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RUP);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RUP);
});
}
-void TurboAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RUP);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RUP);
});
}
-void TurboAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_s(dst, src, RDN);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_s(dst, src, RDN);
});
}
-void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
+void MacroAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
- rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
- tasm->fcvt_w_d(dst, src, RDN);
+ rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
+ masm->fcvt_w_d(dst, src, RDN);
});
}
@@ -2826,7 +2825,7 @@ void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
// handling is needed by NaN, +/-Infinity, +/-0
#if V8_TARGET_ARCH_RISCV64
template <typename F>
-void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundHelper(FPURegister dst, FPURegister src,
FPURegister fpu_scratch, FPURoundingMode frm) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
@@ -2945,7 +2944,7 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
// rounded result; this differs from behavior of RISCV fcvt instructions (which
// round out-of-range values to the nearest max or min value), therefore special
// handling is needed by NaN, +/-Infinity, +/-0
-void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
FPURegister fpu_scratch, FPURoundingMode frm) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
@@ -3038,8 +3037,9 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
// round out-of-range values to the nearest max or min value), therefore special
// handling is needed by NaN, +/-Infinity, +/-0
template <typename F>
-void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
- VRegister v_scratch, FPURoundingMode frm) {
+void MacroAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, FPURoundingMode frm,
+ bool keep_nan_same) {
VU.set(scratch, std::is_same<F, float>::value ? E32 : E64, m1);
// if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
// in mantissa, the result is the same as src, so move src to dest (to avoid
@@ -3065,14 +3065,13 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
// } else {
// srli(rt, rt, 64 - size);
// }
-
+ vmv_vx(v_scratch, zero_reg);
li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits);
vsll_vx(v_scratch, src, scratch);
li(scratch, 64 - kFloatExponentBits);
vsrl_vx(v_scratch, v_scratch, scratch);
li(scratch, kFloatExponentBias + kFloatMantissaBits);
vmslt_vx(v0, v_scratch, scratch);
-
VU.set(frm);
vmv_vv(dst, src);
if (dst == src) {
@@ -3090,71 +3089,85 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
} else {
vfsngj_vv(dst, dst, src);
}
+ if (!keep_nan_same) {
+ vmfeq_vv(v0, src, src);
+ vnot_vv(v0, v0);
+ if (std::is_same<F, float>::value) {
+ fmv_w_x(kScratchDoubleReg, zero_reg);
+ } else {
+#ifdef V8_TARGET_ARCH_RISCV64
+ fmv_d_x(kScratchDoubleReg, zero_reg);
+#else
+ UNIMPLEMENTED();
+#endif
+ }
+ vfadd_vf(dst, src, kScratchDoubleReg, MaskType::Mask);
+ }
}
-void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP, false);
}
-void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP, false);
}
-void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN, false);
}
-void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN, false);
}
-void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ, false);
}
-void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ, false);
}
-void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE);
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE, false);
}
-void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
+void MacroAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
- RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE);
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE, false);
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
}
-void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RUP);
}
-void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RTZ);
}
-void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src,
+void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RNE);
}
#endif
-void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RDN);
@@ -3163,7 +3176,7 @@ void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src,
#endif
}
-void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RUP);
@@ -3172,7 +3185,7 @@ void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
#endif
}
-void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RTZ);
@@ -3181,7 +3194,7 @@ void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
#endif
}
-void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src,
+void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
#if V8_TARGET_ARCH_RISCV64
RoundHelper<float>(dst, src, fpu_scratch, RNE);
@@ -3210,7 +3223,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
fmsub_d(fd, fs, ft, fr);
}
-void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
+void MacroAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2) {
switch (cc) {
case EQ:
@@ -3237,7 +3250,7 @@ void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
+void MacroAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2) {
switch (cc) {
case EQ:
@@ -3264,7 +3277,7 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3275,7 +3288,7 @@ void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
}
-void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3286,27 +3299,27 @@ void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
FPURegister cmp2) {
CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
+void MacroAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
FPURegister cmp2) {
CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::BranchTrueShortF(Register rs, Label* target) {
+void MacroAssembler::BranchTrueShortF(Register rs, Label* target) {
Branch(target, not_equal, rs, Operand(zero_reg));
}
-void TurboAssembler::BranchFalseShortF(Register rs, Label* target) {
+void MacroAssembler::BranchFalseShortF(Register rs, Label* target) {
Branch(target, equal, rs, Operand(zero_reg));
}
-void TurboAssembler::BranchTrueF(Register rs, Label* target) {
+void MacroAssembler::BranchTrueF(Register rs, Label* target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
@@ -3319,7 +3332,7 @@ void TurboAssembler::BranchTrueF(Register rs, Label* target) {
}
}
-void TurboAssembler::BranchFalseF(Register rs, Label* target) {
+void MacroAssembler::BranchFalseF(Register rs, Label* target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
@@ -3332,7 +3345,7 @@ void TurboAssembler::BranchFalseF(Register rs, Label* target) {
}
}
-void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
+void MacroAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
#if V8_TARGET_ARCH_RISCV64
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3357,7 +3370,7 @@ void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
#endif
}
-void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
+void MacroAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
#if V8_TARGET_ARCH_RISCV64
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3382,7 +3395,7 @@ void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
#endif
}
-void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
+void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
ASM_CODE_COMMENT(this);
// Handle special values first.
if (src == base::bit_cast<uint32_t>(0.0f) && has_single_zero_reg_set_) {
@@ -3408,7 +3421,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
}
}
-void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
+void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
ASM_CODE_COMMENT(this);
// Handle special values first.
if (src == base::bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
@@ -3459,7 +3472,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
}
}
-void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
+void MacroAssembler::CompareI(Register rd, Register rs, const Operand& rt,
Condition cond) {
switch (cond) {
case eq:
@@ -3504,7 +3517,7 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
}
// dest <- (condition != 0 ? zero : dest)
-void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionNotZero(Register dest,
Register condition) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3515,7 +3528,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
}
// dest <- (condition == 0 ? 0 : dest)
-void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+void MacroAssembler::LoadZeroIfConditionZero(Register dest,
Register condition) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3525,7 +3538,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest,
and_(dest, dest, scratch);
}
-void TurboAssembler::Clz32(Register rd, Register xx) {
+void MacroAssembler::Clz32(Register rd, Register xx) {
// 32 bit unsigned in lower word: count number of leading zeros.
// int n = 32;
// unsigned y;
@@ -3602,7 +3615,7 @@ void TurboAssembler::Clz32(Register rd, Register xx) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Clz64(Register rd, Register xx) {
+void MacroAssembler::Clz64(Register rd, Register xx) {
// 64 bit: count number of leading zeros.
// int n = 64;
// unsigned y;
@@ -3656,7 +3669,7 @@ void TurboAssembler::Clz64(Register rd, Register xx) {
bind(&L5);
}
#endif
-void TurboAssembler::Ctz32(Register rd, Register rs) {
+void MacroAssembler::Ctz32(Register rd, Register rs) {
// Convert trailing zeroes to trailing ones, and bits to their left
// to zeroes.
@@ -3680,7 +3693,7 @@ void TurboAssembler::Ctz32(Register rd, Register rs) {
}
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Ctz64(Register rd, Register rs) {
+void MacroAssembler::Ctz64(Register rd, Register rs) {
// Convert trailing zeroes to trailing ones, and bits to their left
// to zeroes.
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3703,7 +3716,7 @@ void TurboAssembler::Ctz64(Register rd, Register rs) {
}
}
#endif
-void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
+void MacroAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
@@ -3754,7 +3767,7 @@ void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
}
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
+void MacroAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
DCHECK_NE(scratch, rs);
DCHECK_NE(scratch, rd);
// uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
@@ -3790,7 +3803,7 @@ void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
srli(rd, rd, 32 + shift);
}
#endif
-void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
UseScratchRegisterScope temps(this);
@@ -3801,7 +3814,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
Branch(done, eq, scratch, Operand(1));
}
-void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
StubCallMode stub_mode) {
@@ -3837,19 +3850,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
(cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
-void TurboAssembler::Branch(int32_t offset) {
+void MacroAssembler::Branch(int32_t offset) {
DCHECK(is_int21(offset));
BranchShort(offset);
}
-void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
const Operand& rt, Label::Distance near_jump) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt);
DCHECK(is_near);
USE(is_near);
}
-void TurboAssembler::Branch(Label* L) {
+void MacroAssembler::Branch(Label* L) {
if (L->is_bound()) {
if (is_near(L)) {
BranchShort(L);
@@ -3865,7 +3878,7 @@ void TurboAssembler::Branch(Label* L) {
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt, Label::Distance near_jump) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt)) {
@@ -3898,7 +3911,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
}
}
-void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3906,20 +3919,20 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
Branch(L, cond, rs, Operand(scratch));
}
-void TurboAssembler::BranchShortHelper(int32_t offset, Label* L) {
+void MacroAssembler::BranchShortHelper(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset21);
j(offset);
}
-void TurboAssembler::BranchShort(int32_t offset) {
+void MacroAssembler::BranchShort(int32_t offset) {
DCHECK(is_int21(offset));
BranchShortHelper(offset, nullptr);
}
-void TurboAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); }
+void MacroAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); }
-int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits);
} else {
@@ -3928,7 +3941,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
return offset;
}
-Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
Register scratch) {
Register r2 = no_reg;
if (rt.is_reg()) {
@@ -3941,14 +3954,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
+bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
*offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
*scratch = GetRtAsRegisterHelper(rt, *scratch);
@@ -3956,7 +3969,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
return true;
}
-bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
+bool MacroAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
UseScratchRegisterScope temps(this);
@@ -4084,7 +4097,7 @@ bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
return true;
}
-bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -4097,28 +4110,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
}
}
-void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
const Operand& rt) {
BranchShortCheck(offset, nullptr, cond, rs, rt);
}
-void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt) {
BranchShortCheck(0, L, cond, rs, rt);
}
-void TurboAssembler::BranchAndLink(int32_t offset) {
+void MacroAssembler::BranchAndLink(int32_t offset) {
BranchAndLinkShort(offset);
}
-void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
const Operand& rt) {
bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt);
DCHECK(is_near);
USE(is_near);
}
-void TurboAssembler::BranchAndLink(Label* L) {
+void MacroAssembler::BranchAndLink(Label* L) {
if (L->is_bound()) {
if (is_near(L)) {
BranchAndLinkShort(L);
@@ -4134,7 +4147,7 @@ void TurboAssembler::BranchAndLink(Label* L) {
}
}
-void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt) {
if (L->is_bound()) {
if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) {
@@ -4157,25 +4170,25 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
}
}
-void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) {
+void MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset21);
jal(offset);
}
-void TurboAssembler::BranchAndLinkShort(int32_t offset) {
+void MacroAssembler::BranchAndLinkShort(int32_t offset) {
DCHECK(is_int21(offset));
BranchAndLinkShortHelper(offset, nullptr);
}
-void TurboAssembler::BranchAndLinkShort(Label* L) {
+void MacroAssembler::BranchAndLinkShort(Label* L) {
BranchAndLinkShortHelper(0, L);
}
// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
// with the slt instructions. We could use sub or add instead but we would miss
// overflow cases, so we keep slt and add an intermediate third instruction.
-bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -4198,7 +4211,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
return true;
}
-bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -4212,20 +4225,20 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
}
}
-void TurboAssembler::LoadFromConstantsTable(Register destination,
+void MacroAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadTaggedPointerField(
- destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
- constant_index)));
+ LoadTaggedField(destination,
+ FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)));
}
-void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
LoadWord(destination, MemOperand(kRootRegister, offset));
}
-void TurboAssembler::LoadRootRegisterOffset(Register destination,
+void MacroAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
@@ -4234,7 +4247,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
}
-void TurboAssembler::Jump(Register target, Condition cond, Register rs,
+void MacroAssembler::Jump(Register target, Condition cond, Register rs,
const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
@@ -4247,7 +4260,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs,
}
}
-void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
Label skip;
if (cond != cc_always) {
@@ -4262,13 +4275,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
}
}
-void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt);
}
-void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK_IMPLIES(options().isolate_independent_code,
@@ -4296,18 +4309,15 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} else {
Jump(code.address(), rmode, cond);
}
-
- int32_t target_index = AddCodeTarget(code);
- Jump(static_cast<intptr_t>(target_index), rmode, cond, rs, rt);
}
-void TurboAssembler::Jump(const ExternalReference& reference) {
+void MacroAssembler::Jump(const ExternalReference& reference) {
li(t6, reference);
Jump(t6);
}
// Note: To call gcc-compiled C code on riscv64, you must call through t6.
-void TurboAssembler::Call(Register target, Condition cond, Register rs,
+void MacroAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
@@ -4334,13 +4344,13 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
}
}
-void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt) {
li(t6, Operand(static_cast<intptr_t>(target), rmode), ADDRESS_LOAD);
Call(t6, cond, rs, rt);
}
-void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -4356,7 +4366,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- DCHECK(code->IsExecutable());
if (CanUseNearCallOrJump(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(code);
@@ -4370,12 +4379,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else {
Call(code.address(), rmode);
}
-
- // int32_t target_index = AddCodeTarget(code);
- // Call(static_cast<Address>(target_index), rmode, cond, rs, rt);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
+void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
#if V8_TARGET_ARCH_RISCV64
static_assert(kSystemPointerSize == 8);
#elif V8_TARGET_ARCH_RISCV32
@@ -4391,12 +4397,12 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
MemOperand(builtin, IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin) {
+void MacroAssembler::CallBuiltinByIndex(Register builtin) {
LoadEntryFromBuiltinIndex(builtin);
Call(builtin);
}
-void TurboAssembler::CallBuiltin(Builtin builtin) {
+void MacroAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
switch (options().builtin_call_jump_mode) {
case BuiltinCallJumpMode::kAbsolute: {
@@ -4405,7 +4411,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
break;
}
case BuiltinCallJumpMode::kPCRelative:
- Call(BuiltinEntry(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
+ near_call(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
break;
case BuiltinCallJumpMode::kIndirect: {
LoadEntryFromBuiltin(builtin, t6);
@@ -4429,7 +4435,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+void MacroAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
switch (options().builtin_call_jump_mode) {
@@ -4439,7 +4445,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
break;
}
case BuiltinCallJumpMode::kPCRelative:
- Jump(BuiltinEntry(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
+ near_jump(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
break;
case BuiltinCallJumpMode::kIndirect: {
LoadEntryFromBuiltin(builtin, t6);
@@ -4448,7 +4454,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
case BuiltinCallJumpMode::kForMksnapshot: {
if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
- Handle<CodeT> code = isolate()->builtins()->code_handle(builtin);
+ Handle<Code> code = isolate()->builtins()->code_handle(builtin);
EmbeddedObjectIndex index = AddEmbeddedObject(code);
DCHECK(is_int32(index));
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET,
@@ -4463,18 +4469,18 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
}
}
-void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin,
Register destination) {
LoadWord(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::BuiltinEntrySlotOffset(builtin));
}
-void TurboAssembler::PatchAndJump(Address target) {
+void MacroAssembler::PatchAndJump(Address target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4491,13 +4497,13 @@ void TurboAssembler::PatchAndJump(Address target) {
pc_ += sizeof(uintptr_t);
}
-void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+void MacroAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
- // Note that this assumes the caller code (i.e. the Code object currently
- // being generated) is immovable or that the callee function cannot trigger
- // GC, since the callee function will return to it.
+ // Note that this assumes the caller code (i.e. the InstructionStream object
+ // currently being generated) is immovable or that the callee function cannot
+ // trigger GC, since the callee function will return to it.
//
// Compute the return address in lr to return to after the jump below. The
// pc is already at '+ 8' from the current instruction; but return is after
@@ -4529,14 +4535,14 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
}
-void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
+void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
Jump(ra, cond, rs, rt);
if (cond == al) {
ForceConstantPoolEmissionWithoutJump();
}
}
-void TurboAssembler::BranchLong(Label* L) {
+void MacroAssembler::BranchLong(Label* L) {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm;
@@ -4545,7 +4551,7 @@ void TurboAssembler::BranchLong(Label* L) {
EmitConstPoolWithJumpIfNeeded();
}
-void TurboAssembler::BranchAndLinkLong(Label* L) {
+void MacroAssembler::BranchAndLinkLong(Label* L) {
// Generate position independent long branch and link.
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm;
@@ -4553,12 +4559,12 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
GenPCRelativeJumpAndLink(t6, imm);
}
-void TurboAssembler::DropAndRet(int drop) {
+void MacroAssembler::DropAndRet(int drop) {
AddWord(sp, sp, drop * kSystemPointerSize);
Ret();
}
-void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
+void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
@@ -4574,7 +4580,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
}
}
-void TurboAssembler::Drop(int count, Condition cond, Register reg,
+void MacroAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
@@ -4605,9 +4611,9 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
}
}
-void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void MacroAssembler::Call(Label* target) { BranchAndLink(target); }
-void TurboAssembler::LoadAddress(Register dst, Label* target,
+void MacroAssembler::LoadAddress(Register dst, Label* target,
RelocInfo::Mode rmode) {
int32_t offset;
if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) {
@@ -4623,14 +4629,14 @@ void TurboAssembler::LoadAddress(Register dst, Label* target,
}
}
-void TurboAssembler::Push(Smi smi) {
+void MacroAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
push(scratch);
}
-void TurboAssembler::PushArray(Register array, Register size,
+void MacroAssembler::PushArray(Register array, Register size,
PushArrayOrder order) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4659,7 +4665,7 @@ void TurboAssembler::PushArray(Register array, Register size,
}
}
-void TurboAssembler::Push(Handle<HeapObject> handle) {
+void MacroAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(handle));
@@ -4702,7 +4708,7 @@ void MacroAssembler::PopStackHandler() {
StoreWord(a1, MemOperand(scratch));
}
-void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
// become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
@@ -4713,19 +4719,19 @@ void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
fsub_d(dst, src, kDoubleRegZero);
}
-void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, fa0); // Reg fa0 is FP return value.
}
-void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, fa0); // Reg fa0 is FP first argument value.
}
-void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); }
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); }
-void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); }
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); }
-void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
const DoubleRegister fparg2 = fa1;
if (src2 == fa0) {
@@ -4748,10 +4754,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
kind == StackLimitKind::kRealStackLimit
? ExternalReference::address_of_real_jslimit(isolate)
: ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+ DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
LoadWord(destination,
MemOperand(kRootRegister, static_cast<int32_t>(offset)));
@@ -4917,8 +4923,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(code,
- FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -4945,11 +4950,10 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
{
UseScratchRegisterScope temps(this);
Register temp_reg = temps.Acquire();
- LoadTaggedPointerField(
+ LoadTaggedField(
temp_reg,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- LoadTaggedPointerField(
- cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// The argument count is stored as uint16_t
Lhu(expected_parameter_count,
FieldMemOperand(temp_reg,
@@ -4970,7 +4974,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, a1);
// Get the function and setup the context.
- LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -4993,7 +4997,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
}
//------------------------------------------------------------------------------
// Wasm
-void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmseq_vv(v0, lhs, rhs);
@@ -5002,7 +5006,7 @@ void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsne_vv(v0, lhs, rhs);
@@ -5011,7 +5015,7 @@ void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsle_vv(v0, rhs, lhs);
@@ -5020,7 +5024,7 @@ void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsleu_vv(v0, rhs, lhs);
@@ -5029,7 +5033,7 @@ void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmslt_vv(v0, rhs, lhs);
@@ -5038,7 +5042,7 @@ void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
+void MacroAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
VSew sew, Vlmul lmul) {
VU.set(kScratchReg, sew, lmul);
vmsltu_vv(v0, rhs, lhs);
@@ -5047,20 +5051,18 @@ void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
vmerge_vx(dst, kScratchReg, dst);
}
-void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
- uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(imms));
- uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(imms)) + 1);
- VU.set(kScratchReg, VSew::E64, Vlmul::m1);
- li(kScratchReg, 1);
- vmv_vx(v0, kScratchReg);
- li(kScratchReg, imm1);
- vmerge_vx(dst, kScratchReg, dst);
- li(kScratchReg, imm2);
- vsll_vi(v0, v0, 1);
- vmerge_vx(dst, kScratchReg, dst);
+void MacroAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
+ uint64_t vals[2];
+ memcpy(vals, imms, sizeof(vals));
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, vals[1]);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vslideup_vi(dst, kSimd128ScratchReg, 1);
+ li(kScratchReg, vals[0]);
+ vmv_sx(dst, kScratchReg);
}
-void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
+void MacroAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
MemOperand src) {
if (ts == 8) {
Lbu(kScratchReg2, src);
@@ -5092,7 +5094,7 @@ void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
}
}
-void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
+void MacroAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
MemOperand dst) {
if (sz == 8) {
VU.set(kScratchReg, E8, m1);
@@ -5120,7 +5122,7 @@ void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
// -----------------------------------------------------------------------------
// Runtime calls.
#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::AddOverflow64(Register dst, Register left,
+void MacroAssembler::AddOverflow64(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5150,7 +5152,7 @@ void TurboAssembler::AddOverflow64(Register dst, Register left,
}
}
-void TurboAssembler::SubOverflow64(Register dst, Register left,
+void MacroAssembler::SubOverflow64(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5182,7 +5184,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left,
}
}
-void TurboAssembler::MulOverflow32(Register dst, Register left,
+void MacroAssembler::MulOverflow32(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -5208,7 +5210,7 @@ void TurboAssembler::MulOverflow32(Register dst, Register left,
xor_(overflow, overflow, dst);
}
-void TurboAssembler::MulOverflow64(Register dst, Register left,
+void MacroAssembler::MulOverflow64(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -5239,7 +5241,7 @@ void TurboAssembler::MulOverflow64(Register dst, Register left,
}
#elif V8_TARGET_ARCH_RISCV32
-void TurboAssembler::AddOverflow(Register dst, Register left,
+void MacroAssembler::AddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5269,7 +5271,7 @@ void TurboAssembler::AddOverflow(Register dst, Register left,
}
}
-void TurboAssembler::SubOverflow(Register dst, Register left,
+void MacroAssembler::SubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5301,7 +5303,7 @@ void TurboAssembler::SubOverflow(Register dst, Register left,
}
}
-void TurboAssembler::MulOverflow32(Register dst, Register left,
+void MacroAssembler::MulOverflow32(Register dst, Register left,
const Operand& right, Register overflow) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
@@ -5326,8 +5328,8 @@ void TurboAssembler::MulOverflow32(Register dst, Register left,
}
#endif
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
ASM_CODE_COMMENT(this);
// All parameters are on the stack. a0 has the return value after call.
@@ -5342,8 +5344,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference::Create(f));
- Handle<Code> code =
- CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), f->result_size);
Call(code, RelocInfo::CODE_TARGET);
}
@@ -5361,27 +5362,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
- ArgvMode::kStack, builtin_exit_frame);
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
- // Ld a Address from a constant pool.
- // Record a value into constant pool.
- ASM_CODE_COMMENT(this);
- if (!v8_flags.riscv_constant_pool) {
- li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- } else {
- RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET);
- RecordRelocInfo(RelocInfo::OFF_HEAP_TARGET, entry);
- auipc(kOffHeapTrampolineRegister, 0);
- LoadWord(kOffHeapTrampolineRegister,
- MemOperand(kOffHeapTrampolineRegister, 0));
- }
- Jump(kOffHeapTrampolineRegister);
-}
-
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
ASM_CODE_COMMENT(this);
@@ -5424,15 +5409,15 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void TurboAssembler::Trap() { stop(); }
-void TurboAssembler::DebugBreak() { stop(); }
+void MacroAssembler::Trap() { stop(); }
+void MacroAssembler::DebugBreak() { stop(); }
-void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (v8_flags.debug_code) Check(cc, reason, rs, rt);
}
-void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
+void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
BranchShort(&L, cc, rs, rt);
@@ -5441,7 +5426,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
bind(&L);
}
-void TurboAssembler::Abort(AbortReason reason) {
+void MacroAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
if (v8_flags.code_comments) {
@@ -5498,22 +5483,21 @@ void TurboAssembler::Abort(AbortReason reason) {
}
}
-void TurboAssembler::LoadMap(Register destination, Register object) {
+void MacroAssembler::LoadMap(Register destination, Register object) {
ASM_CODE_COMMENT(this);
- LoadTaggedPointerField(destination,
- FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ASM_CODE_COMMENT(this);
LoadMap(dst, cp);
- LoadTaggedPointerField(
+ LoadTaggedField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void TurboAssembler::StubPrologue(StackFrame::Type type) {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5521,9 +5505,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
PushCommonFrame(scratch);
}
-void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+void MacroAssembler::Prologue() { PushStandardFrame(a1); }
-void TurboAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5540,14 +5524,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
#endif // V8_ENABLE_WEBASSEMBLY
}
-void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
addi(sp, fp, 2 * kSystemPointerSize);
LoadWord(ra, MemOperand(fp, 1 * kSystemPointerSize));
LoadWord(fp, MemOperand(fp, 0 * kSystemPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+void MacroAssembler::EnterExitFrame(int stack_space,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
@@ -5600,19 +5584,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- if (save_doubles) {
- // The stack is already aligned to 0 modulo 8 for stores with sdc1.
- int space = kNumCallerSavedFPU * kDoubleSize;
- SubWord(sp, sp, Operand(space));
- int count = 0;
- for (int i = 0; i < kNumFPURegisters; i++) {
- if (kCallerSavedFPU.bits() & (1 << i)) {
- FPURegister reg = FPURegister::from_code(i);
- StoreDouble(reg, MemOperand(sp, count * kDoubleSize));
- count++;
- }
- }
- }
// Reserve place for the return address, stack space and an optional slot
// (used by DirectCEntry to hold the return value if a struct is
@@ -5632,28 +5603,12 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreWord(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool do_return,
+void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return,
bool argument_count_is_length) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- // Optionally restore all double registers.
- if (save_doubles) {
- // Remember: we only need to restore kCallerSavedFPU.
- SubWord(scratch, fp,
- Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumCallerSavedFPU * kDoubleSize));
- int cout = 0;
- for (int i = 0; i < kNumFPURegisters; i++) {
- if (kCalleeSavedFPU.bits() & (1 << i)) {
- FPURegister reg = FPURegister::from_code(i);
- LoadDouble(reg, MemOperand(scratch, cout * kDoubleSize));
- cout++;
- }
- }
- }
// Clear top frame.
li(scratch,
@@ -5692,7 +5647,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-int TurboAssembler::ActivationFrameAlignment() {
+int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -5730,7 +5685,7 @@ void MacroAssembler::AssertStackIsAligned() {
}
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
ASM_CODE_COMMENT(this);
if (SmiValuesAre32Bits()) {
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
@@ -5745,7 +5700,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::SmiToInt32(Register smi) {
+void MacroAssembler::SmiToInt32(Register smi) {
ASM_CODE_COMMENT(this);
if (v8_flags.enable_slow_asserts) {
AssertSmi(smi);
@@ -5754,7 +5709,7 @@ void TurboAssembler::SmiToInt32(Register smi) {
SmiUntag(smi);
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
@@ -5763,12 +5718,9 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
Branch(smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfCodeTIsMarkedForDeoptimization(
- Register codet, Register scratch, Label* if_marked_for_deoptimization) {
- LoadTaggedPointerField(
- scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
- Lw(scratch,
- FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization(
+ Register code, Register scratch, Label* if_marked_for_deoptimization) {
+ Load32U(scratch, FieldMemOperand(code, Code::kKindSpecificFlagsOffset));
And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Branch(if_marked_for_deoptimization, ne, scratch, Operand(zero_reg));
}
@@ -5787,7 +5739,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
-void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) {
+void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -5797,7 +5749,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) {
}
}
-void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
+void MacroAssembler::AssertSmi(Register object, AbortReason reason) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(this);
static_assert(kSmiTag == 0);
@@ -5910,7 +5862,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
template <typename F_TYPE>
-void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
+void MacroAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
FPURegister src2, MaxMinKind kind) {
DCHECK((std::is_same<F_TYPE, float>::value) ||
(std::is_same<F_TYPE, double>::value));
@@ -5965,25 +5917,25 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
bind(&done);
}
-void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMax);
}
-void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMin);
}
-void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMax);
}
-void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2) {
ASM_CODE_COMMENT(this);
FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMin);
@@ -5991,7 +5943,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
static const int kRegisterPassedArguments = 8;
-int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments,
+int MacroAssembler::CalculateStackPassedDWords(int num_gp_arguments,
int num_fp_arguments) {
int stack_passed_dwords = 0;
@@ -6007,7 +5959,7 @@ int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments,
return stack_passed_dwords;
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
ASM_CODE_COMMENT(this);
@@ -6032,36 +5984,42 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
+void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
BlockTrampolinePoolScope block_trampoline_pool(this);
li(t6, function);
- CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
+ set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunction(Register function, int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+void MacroAssembler::CallCFunction(Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
+ CallCFunction(function, num_arguments, 0, set_isolate_data_slots);
}
-void TurboAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
+void MacroAssembler::CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
ASM_CODE_COMMENT(this);
@@ -6096,42 +6054,49 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
{
- if (function != t6) {
- Mv(t6, function);
- function = t6;
- }
-
- // Save the frame pointer and PC so that the stack layout remains
- // iterable, even without an ExitFrame which normally exists between JS
- // and C frames.
- // 't' registers are caller-saved so this is safe as a scratch register.
- Register pc_scratch = t1;
- Register scratch = t2;
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ if (function != t6) {
+ Mv(t6, function);
+ function = t6;
+ }
- auipc(pc_scratch, 0);
- // See x64 code for reasoning about how to address the isolate data fields.
- if (root_array_available()) {
- StoreWord(pc_scratch,
- MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- StoreWord(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
- } else {
- DCHECK_NOT_NULL(isolate());
- li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
- StoreWord(pc_scratch, MemOperand(scratch));
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreWord(fp, MemOperand(scratch));
+ // Save the frame pointer and PC so that the stack layout remains
+ // iterable, even without an ExitFrame which normally exists between JS
+ // and C frames.
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+
+ auipc(pc_scratch, 0);
+ // See x64 code for reasoning about how to address the isolate data
+ // fields.
+ if (root_array_available()) {
+ StoreWord(pc_scratch,
+ MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreWord(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch,
+ ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ StoreWord(pc_scratch, MemOperand(scratch));
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreWord(fp, MemOperand(scratch));
+ }
}
Call(function);
-
- if (isolate() != nullptr) {
- // We don't unset the PC; the FP is the source of truth.
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreWord(zero_reg, MemOperand(scratch));
+ if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreWord(zero_reg, MemOperand(scratch));
+ }
}
}
@@ -6147,7 +6112,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
LoadWord(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
@@ -6170,21 +6135,15 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
-void TurboAssembler::ComputeCodeStartAddress(Register dst) {
- // This push on ra and the pop below together ensure that we restore the
- // register ra, which is needed while computing the code start address.
- push(ra);
-
- auipc(ra, 0);
- addi(ra, ra, kInstrSize * 2); // ra = address of li
- int pc = pc_offset();
- li(dst, Operand(pc));
- SubWord(dst, ra, dst);
-
- pop(ra); // Restore ra
+void MacroAssembler::ComputeCodeStartAddress(Register dst) {
+ auto pc = -pc_offset();
+ auipc(dst, 0);
+ if (pc != 0) {
+ SubWord(dst, dst, pc);
+ }
}
-void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
@@ -6197,87 +6156,35 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
: Deoptimizer::kEagerDeoptExitSize);
}
-void TurboAssembler::LoadCodeObjectEntry(Register destination,
- Register code_object) {
- // Code objects are called differently depending on whether we are generating
- // builtin code (which will later be embedded into the binary) or compiling
- // user JS code at runtime.
- // * Builtin code runs in --jitless mode and thus must not call into on-heap
- // Code targets. Instead, we dispatch through the builtins entry table.
- // * Codegen at runtime does not have this restriction and we can use the
- // shorter, branchless instruction sequence. The assumption here is that
- // targets are usually generated code and not builtin Code objects.
+void MacroAssembler::LoadCodeEntry(Register destination, Register code) {
ASM_CODE_COMMENT(this);
- if (options().isolate_independent_code) {
- DCHECK(root_array_available());
- Label if_code_is_off_heap, out;
-
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
-
- Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
- // Not an off-heap trampoline object, the entry point is at
- // Code::raw_instruction_start().
- AddWord(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- Branch(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- slli(destination, scratch, kSystemPointerSizeLog2);
- AddWord(destination, destination, kRootRegister);
- LoadWord(
- destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
-
- bind(&out);
- } else {
- AddWord(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
- }
+ LoadWord(destination, FieldMemOperand(code, Code::kCodeEntryPointOffset));
}
-void TurboAssembler::CallCodeObject(Register code_object) {
+void MacroAssembler::CallCodeObject(Register code) {
ASM_CODE_COMMENT(this);
- LoadCodeObjectEntry(code_object, code_object);
- Call(code_object);
+ LoadCodeEntry(code, code);
+ Call(code);
}
-void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
- LoadCodeObjectEntry(code_object, code_object);
- Jump(code_object);
-}
-#if V8_TARGET_ARCH_RISCV64
-void TurboAssembler::LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand) {
- if (COMPRESS_POINTERS_BOOL) {
- DecompressTaggedPointer(destination, field_operand);
- } else {
- Ld(destination, field_operand);
- }
+ LoadCodeEntry(code, code);
+ Jump(code);
}
-void TurboAssembler::LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand) {
+#if V8_TARGET_ARCH_RISCV64
+void MacroAssembler::LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
- DecompressAnyTagged(destination, field_operand);
+ DecompressTagged(destination, field_operand);
} else {
Ld(destination, field_operand);
}
}
-void TurboAssembler::LoadTaggedSignedField(const Register& destination,
+void MacroAssembler::LoadTaggedSignedField(const Register& destination,
const MemOperand& field_operand) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedSigned(destination, field_operand);
@@ -6286,11 +6193,11 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination,
}
}
-void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
SmiUntag(dst, src);
}
-void TurboAssembler::StoreTaggedField(const Register& value,
+void MacroAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) {
if (COMPRESS_POINTERS_BOOL) {
Sw(value, dst_field_operand);
@@ -6299,7 +6206,7 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
-void TurboAssembler::DecompressTaggedSigned(const Register& destination,
+void MacroAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
@@ -6310,26 +6217,19 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination,
}
}
-void TurboAssembler::DecompressTaggedPointer(const Register& destination,
- const MemOperand& field_operand) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
Lwu(destination, field_operand);
AddWord(destination, kPtrComprCageBaseRegister, destination);
}
-void TurboAssembler::DecompressTaggedPointer(const Register& destination,
- const Register& source) {
+void MacroAssembler::DecompressTagged(const Register& destination,
+ const Register& source) {
ASM_CODE_COMMENT(this);
And(destination, source, Operand(0xFFFFFFFF));
AddWord(destination, kPtrComprCageBaseRegister, Operand(destination));
}
-
-void TurboAssembler::DecompressAnyTagged(const Register& destination,
- const MemOperand& field_operand) {
- ASM_CODE_COMMENT(this);
- Lwu(destination, field_operand);
- AddWord(destination, kPtrComprCageBaseRegister, destination);
-}
#endif
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
ArgumentsCountMode mode, Register scratch) {