summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/riscv/macro-assembler-riscv.h')
-rw-r--r--deps/v8/src/codegen/riscv/macro-assembler-riscv.h261
1 files changed, 140 insertions, 121 deletions
diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
index 76c6436991..193b10658f 100644
--- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
+++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h
@@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
public:
- using TurboAssemblerBase::TurboAssemblerBase;
+ using MacroAssemblerBase::MacroAssemblerBase;
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -109,7 +109,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
li(kRootRegister, Operand(isolate_root));
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#ifdef V8_COMPRESS_POINTERS
LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
@@ -123,7 +123,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trap();
void DebugBreak();
-
+#ifdef USE_SIMULATOR
+ // See src/codegen/riscv/base-constants-riscv.h DebugParameters.
+ void Debug(uint32_t parameters) { break_(parameters, false); }
+#endif
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
@@ -228,6 +231,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
jalr(rd, Lo12); // jump PC + Hi20 + Lo12
}
+
+ // Generate a B immediate instruction with the corresponding relocation info.
+ // 'offset' is the immediate to encode in the B instruction (so it is the
+ // difference between the target and the PC of the instruction, divided by
+ // the instruction size).
+ void near_jump(int offset, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.Acquire();
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode, offset);
+ GenPCRelativeJump(temp, offset);
+ }
+ // Generate a BL immediate instruction with the corresponding relocation info.
+ // As for near_jump, 'offset' is the immediate to encode in the BL
+ // instruction.
+ void near_call(int offset, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.Acquire();
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode, offset);
+ GenPCRelativeJumpAndLink(temp, offset);
+ }
+ // Generate a BL immediate instruction with the corresponding relocation info
+ // for the input HeapNumberRequest.
+ void near_call(HeapNumberRequest request) { UNIMPLEMENTED(); }
+
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
@@ -257,6 +284,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register dst, Label* target,
RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ // Load the code entry point from the Code object.
+ void LoadCodeEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
// Load the builtin given by the Smi in |builtin| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin);
@@ -266,11 +299,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallBuiltin(Builtin builtin);
void TailCallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object);
- void CallCodeObject(Register code_object);
- void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump);
-
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
@@ -556,12 +584,23 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum class SetIsolateDataSlots {
+ kNo,
+ kYes,
+ };
+ void CallCFunction(
+ ExternalReference function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ ExternalReference function, int num_reg_arguments,
+ int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+ void CallCFunction(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
void MovFromFloatResult(DoubleRegister dst);
void MovFromFloatParameter(DoubleRegister dst);
@@ -1038,14 +1077,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// ---------------------------------------------------------------------------
// Pointer compression Support
- // Loads a field containing a HeapObject and decompresses it if pointer
- // compression is enabled.
- void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand);
-
// Loads a field containing any tagged value and decompresses it if necessary.
- void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand);
+ void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand);
// Loads a field containing a tagged signed value and decompresses it if
// necessary.
@@ -1061,12 +1095,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
- void DecompressTaggedPointer(const Register& destination,
- const MemOperand& field_operand);
- void DecompressTaggedPointer(const Register& destination,
- const Register& source);
- void DecompressAnyTagged(const Register& destination,
- const MemOperand& field_operand);
+ void DecompressTagged(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressTagged(const Register& destination, const Register& source);
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
if (COMPRESS_POINTERS_BOOL) {
Sub32(rd, rs1, rs2);
@@ -1079,12 +1110,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pointer compression Support
// rv32 don't support Pointer compression. Defines these functions for
// simplify builtins.
- inline void LoadTaggedPointerField(const Register& destination,
- const MemOperand& field_operand) {
- Lw(destination, field_operand);
- }
- inline void LoadAnyTaggedField(const Register& destination,
- const MemOperand& field_operand) {
+ inline void LoadTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
Lw(destination, field_operand);
}
inline void LoadTaggedSignedField(const Register& destination,
@@ -1140,71 +1167,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
- protected:
- inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
- inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
-
- private:
- bool has_double_zero_reg_set_ = false;
- bool has_single_zero_reg_set_ = false;
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
- Label* done);
-
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-
- // TODO(RISCV) Reorder parameters so out parameters come last.
- bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
- Register* scratch, const Operand& rt);
-
- void BranchShortHelper(int32_t offset, Label* L);
- bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt);
- bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt);
-
- void BranchAndLinkShortHelper(int32_t offset, Label* L);
- void BranchAndLinkShort(int32_t offset);
- void BranchAndLinkShort(Label* L);
- bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- void BranchAndLinkLong(Label* L);
-#if V8_TARGET_ARCH_RISCV64
- template <typename F_TYPE>
- void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
- FPURoundingMode mode);
-#elif V8_TARGET_ARCH_RISCV32
- void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
- FPURoundingMode mode);
-
- void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
- FPURoundingMode mode);
-#endif
- template <typename F>
- void RoundHelper(VRegister dst, VRegister src, Register scratch,
- VRegister v_scratch, FPURoundingMode frm);
-
- template <typename TruncFunc>
- void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
- TruncFunc trunc);
-
- // Push a fixed frame, consisting of ra, fp.
- void PushCommonFrame(Register marker_reg = no_reg);
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
- public:
- using TurboAssembler::TurboAssembler;
-
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@@ -1287,12 +1249,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack.
// stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ void EnterExitFrame(int stack_space = 0,
StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles, Register arg_count,
- bool do_return = NO_EMIT_RETURN,
+ void LeaveExitFrame(Register arg_count, bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
// Make sure the stack is aligned. Only emits code in debug mode.
@@ -1342,7 +1303,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Tiering support.
void AssertFeedbackVector(Register object,
- Register scratch) NOOP_UNLESS_DEBUG_CODE
+ Register scratch) NOOP_UNLESS_DEBUG_CODE;
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
@@ -1364,20 +1325,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls.
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ void CallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
+ CallRuntime(function, function->nargs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
// Convenience function: tail call a runtime routine (jump).
@@ -1386,10 +1344,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
-
- // Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToOffHeapInstructionStream(Address entry);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -1452,8 +1406,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
ArgumentsCountType type,
ArgumentsCountMode mode,
Register scratch = no_reg);
- void JumpIfCodeTIsMarkedForDeoptimization(
- Register codet, Register scratch, Label* if_marked_for_deoptimization);
+ void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch,
+ Label* if_marked_for_deoptimization);
Operand ClearedValue() const;
// Jump if the register contains a non-smi.
@@ -1491,7 +1445,67 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
+ protected:
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
private:
+ bool has_double_zero_reg_set_ = false;
+ bool has_single_zero_reg_set_ = false;
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ void CallCFunctionHelper(
+ Register function, int num_reg_arguments, int num_double_arguments,
+ SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes);
+
+ // TODO(RISCV) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
+
+ void BranchShortHelper(int32_t offset, Label* L);
+ bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+
+ void BranchAndLinkShortHelper(int32_t offset, Label* L);
+ void BranchAndLinkShort(int32_t offset);
+ void BranchAndLinkShort(Label* L);
+ bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ void BranchAndLinkLong(Label* L);
+#if V8_TARGET_ARCH_RISCV64
+ template <typename F_TYPE>
+ void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+#elif V8_TARGET_ARCH_RISCV32
+ void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+
+ void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ FPURoundingMode mode);
+#endif
+ template <typename F>
+ void RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, FPURoundingMode frm,
+ bool keep_nan_same = true);
+
+ template <typename TruncFunc>
+ void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
+ TruncFunc trunc);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
@@ -1508,7 +1522,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
};
template <typename Func>
-void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.
@@ -1537,9 +1551,14 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
struct MoveCycleState {
- // Whether a move in the cycle needs the scratch or double scratch register.
- bool pending_scratch_register_use = false;
- bool pending_double_scratch_register_use = false;
+ // List of scratch registers reserved for pending moves in a move cycle, and
+ // which should therefore not be used as a temporary location by
+ // {MoveToTempLocation}.
+ RegList scratch_regs;
+ // Available scratch registers during the move cycle resolution scope.
+ base::Optional<UseScratchRegisterScope> temps;
+ // Scratch register picked by {MoveToTempLocation}.
+ base::Optional<Register> scratch_reg;
};
#define ACCESS_MASM(masm) masm->