summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/riscv64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/riscv64')
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h32
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc146
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h80
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.cc5
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h15
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h265
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc313
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc367
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h59
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h2
10 files changed, 740 insertions, 544 deletions
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index 40bd56d15b..d301a00bf4 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -63,11 +63,15 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -133,9 +137,13 @@ HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
+ }
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
@@ -163,11 +171,11 @@ void RelocInfo::set_target_external_reference(
}
Address RelocInfo::target_internal_reference() {
- if (rmode_ == INTERNAL_REFERENCE) {
+ if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are j/jal instructions.
- DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReferenceEncoded(rmode_));
DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize)));
Address address = Assembler::target_address_at(pc_);
return address;
@@ -175,10 +183,20 @@ Address RelocInfo::target_internal_reference() {
}
Address RelocInfo::target_internal_reference_address() {
- DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr1 = Assembler::instr_at(pc);
+ Instr instr2 = Assembler::instr_at(pc + kInstrSize);
+ DCHECK(IsAuipc(instr1));
+ DCHECK(IsJalr(instr2));
+ int32_t code_target_index = BrachlongOffset(instr1, instr2);
+ return GetCodeTarget(code_target_index);
+}
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 914ea26f9f..35c56ccdf5 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -128,7 +128,8 @@ Register ToRegister(int num) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -454,6 +455,16 @@ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
return instr;
}
+static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
+ DCHECK(Assembler::IsJalr(instr));
+ DCHECK(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ DCHECK(Assembler::IsJalr(instr | (imm12 & kImm12Mask)));
+ DCHECK(Assembler::JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
+ return instr | (imm12 & kImm12Mask);
+}
+
static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) {
DCHECK(Assembler::IsJal(instr));
int32_t imm = target_pos - pos;
@@ -689,17 +700,36 @@ int Assembler::CJumpOffset(Instr instr) {
int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) {
DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
InstructionBase::kIType);
- const int kImm19_0Mask = ((1 << 20) - 1);
- int32_t imm_auipc = auipc & (kImm19_0Mask << 12);
- int32_t imm_12 = instr_I >> 20;
- int32_t offset = imm_12 + imm_auipc;
+ DCHECK(IsAuipc(auipc));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = (instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
return offset;
}
+int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc,
+ Instr instr_jalr, int32_t offset) {
+ DCHECK(IsAuipc(instr_auipc));
+ DCHECK(IsJalr(instr_jalr));
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ CHECK(is_int32(offset));
+ instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc));
+ instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr));
+ DCHECK(offset ==
+ BrachlongOffset(Assembler::instr_at(pc), Assembler::instr_at(pc + 4)));
+ return 2;
+}
+
int Assembler::LdOffset(Instr instr) {
DCHECK(IsLd(instr));
int32_t imm12 = (instr & kImm12Mask) >> 20;
- imm12 = imm12 << 12 >> 12;
+ return imm12;
+}
+
+int Assembler::JalrOffset(Instr instr) {
+ DCHECK(IsJalr(instr));
+ int32_t imm12 = (instr & kImm12Mask) >> 20;
return imm12;
}
@@ -717,7 +747,7 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
}
void Assembler::disassembleInstr(Instr instr) {
- if (!FLAG_debug_riscv) return;
+ if (!FLAG_riscv_debug) return;
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
@@ -2567,9 +2597,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
// Must not overwrite the register 'base' while loading 'offset'.
DCHECK(src->rm() != scratch);
- RV_li(scratch, src->offset());
- add(scratch, scratch, src->rm());
- src->offset_ = 0;
+ constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
+ constexpr int32_t kMaxOffsetForSimpleAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ addi(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addi(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
+ } else if (access_type == OffsetAccessType::SINGLE_ACCESS) {
+ RV_li(scratch, (static_cast<int64_t>(src->offset()) + 0x800) >> 12 << 12);
+ add(scratch, scratch, src->rm());
+ src->offset_ = src->offset() << 20 >> 20;
+ } else {
+ RV_li(scratch, src->offset());
+ add(scratch, scratch, src->rm());
+ src->offset_ = 0;
+ }
src->rm_ = scratch;
}
@@ -2596,6 +2642,22 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ if (IsAuipc(instr) && IsJalr(instr1)) {
+ int32_t imm;
+ imm = BrachlongOffset(instr, instr1);
+ imm -= pc_delta;
+ PatchBranchlongOffset(pc, instr, instr1, imm);
+ return;
+ } else {
+ UNREACHABLE();
+ }
+}
+
void Assembler::GrowBuffer() {
DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
// Compute new buffer size.
@@ -2766,12 +2828,23 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) {
- DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
- int32_t Hi20 = AuipcOffset(*instr);
- int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
- Memory<Address>(pc + Hi20 + Lo12) = target;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ Memory<Address>(pc + Hi20 + Lo12) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ }
+ } else {
+ DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
+ int64_t imm = (int64_t)target - (int64_t)pc;
+ Instr instr = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ DCHECK(is_int32(imm));
+ int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, num * kInstrSize);
+ }
}
} else {
set_target_address_at(pc, target, icache_flush_mode);
@@ -2781,10 +2854,17 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) {
- DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
- int32_t Hi20 = AuipcOffset(*instr);
- int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
- return Memory<Address>(pc + Hi20 + Lo12);
+ if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return Memory<Address>(pc + Hi20 + Lo12);
+ } else {
+ DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = JalrOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return pc + Hi20 + Lo12;
+ }
+
} else {
return target_address_at(pc);
}
@@ -2890,17 +2970,17 @@ bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
- // 0: ld x0, t3, #offset
+ // 0: ld x0, x0, #offset
Instr instr_value = *reinterpret_cast<Instr*>(instr);
-
- bool result = IsLd(instr_value) && (instr->RdValue() == kRegCode_zero_reg);
- // It is still worth asserting the marker is complete.
- // 4: j 0
+ bool result = IsLd(instr_value) && (instr->Rs1Value() == kRegCode_zero_reg) &&
+ (instr->RdValue() == kRegCode_zero_reg);
#ifdef DEBUG
- Instruction* instr_fllowing = instr + kInstrSize;
- DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_fllowing)) &&
- instr_fllowing->Imm20JValue() == 0 &&
- instr_fllowing->RdValue() == kRegCode_zero_reg));
+ // It is still worth asserting the marker is complete.
+ // 1: j 0x0
+ Instruction* instr_following = instr + kInstrSize;
+ DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_following)) &&
+ instr_following->Imm20JValue() == 0 &&
+ instr_following->RdValue() == kRegCode_zero_reg));
#endif
return result;
}
@@ -2941,9 +3021,9 @@ void ConstantPool::EmitPrologue(Alignment require_alignment) {
int ConstantPool::PrologueSize(Jump require_jump) const {
// Prologue is:
- // j over ;; if require_jump
- // ld x0, t3, #pool_size
- // j xzr
+ // j over ;; if require_jump
+ // ld x0, x0, #pool_size
+ // j 0x0
int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
prologue_size += 2 * kInstrSize;
return prologue_size;
@@ -2954,7 +3034,7 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
const ConstantPoolKey& key) {
Instr instr_auipc = assm_->instr_at(load_offset);
Instr instr_ld = assm_->instr_at(load_offset + 4);
- // Instruction to patch must be 'ld t3, t3, offset' with offset == kInstrSize.
+ // Instruction to patch must be 'ld rd, offset(rd)' with 'offset == 0'.
DCHECK(assm_->IsAuipc(instr_auipc));
DCHECK(assm_->IsLd(instr_ld));
DCHECK_EQ(assm_->LdOffset(instr_ld), 0);
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 1dcf4e0aae..ff66351d6a 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -53,7 +53,7 @@ namespace v8 {
namespace internal {
#define DEBUG_PRINTF(...) \
- if (FLAG_debug_riscv) { \
+ if (FLAG_riscv_debug) { \
printf(__VA_ARGS__); \
}
@@ -160,6 +160,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler() { CHECK(constpool_.IsEmpty()); }
+ void AbortedCodeGeneration() { constpool_.Clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
@@ -208,11 +209,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Get offset from instr.
int BranchOffset(Instr instr);
- int BrachlongOffset(Instr auipc, Instr jalr);
+ static int BrachlongOffset(Instr auipc, Instr jalr);
+ static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I,
+ int32_t offset);
int JumpOffset(Instr instr);
int CJumpOffset(Instr instr);
static int LdOffset(Instr instr);
static int AuipcOffset(Instr instr);
+ static int JalrOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
@@ -800,6 +804,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
@@ -862,8 +868,40 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsLd(Instr instr);
void CheckTrampolinePool();
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ using BlockPoolsScope = BlockTrampolinePoolScope;
+
+ void RecordConstPool(int size);
+
+ void ForceConstantPoolEmissionWithoutJump() {
+ constpool_.Check(Emission::kForced, Jump::kOmitted);
+ }
+ void ForceConstantPoolEmissionWithJump() {
+ constpool_.Check(Emission::kForced, Jump::kRequired);
+ }
+ // Check if the const pool needs to be emitted while pretending that {margin}
+ // more bytes of instructions have already been emitted.
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
+ }
+
+ void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
+ }
+
+ void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
+ void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -949,34 +987,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
- using BlockPoolsScope = BlockTrampolinePoolScope;
-
- void RecordConstPool(int size);
-
- void ForceConstantPoolEmissionWithoutJump() {
- constpool_.Check(Emission::kForced, Jump::kOmitted);
- }
- void ForceConstantPoolEmissionWithJump() {
- constpool_.Check(Emission::kForced, Jump::kRequired);
- }
- // Check if the const pool needs to be emitted while pretending that {margin}
- // more bytes of instructions have already been emitted.
- void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
- constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
- }
-
- void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
- constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
- }
-
- void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
- constpool_.RecordEntry(data, rmode);
- }
-
- void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
- constpool_.RecordEntry(data, rmode);
- }
-
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1231,6 +1241,16 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
private:
RegList* available_;
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.cc b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
index 045488bf7f..d2709dc2c7 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -106,8 +106,11 @@ int FPURegisters::Number(const char* name) {
}
InstructionBase::Type InstructionBase::InstructionType() const {
+ if (IsIllegalInstruction()) {
+ return kUnsupported;
+ }
// RV64C Instruction
- if (IsShortInstruction()) {
+ if (FLAG_riscv_c_extension && IsShortInstruction()) {
switch (InstructionBits() & kRvcOpcodeMask) {
case RO_C_ADDI4SPN:
return kCIWType;
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index 3b5ffff6da..c8f54d8f7f 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -8,6 +8,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
// UNIMPLEMENTED_ macro for RISCV.
#ifdef DEBUG
@@ -55,8 +56,7 @@ const uint32_t kLessSignificantWordInDoublewordOffset = 4;
namespace v8 {
namespace internal {
-// TODO(sigurds): Change this value once we use relative jumps.
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -727,18 +727,25 @@ class InstructionBase {
kUnsupported = -1
};
+ inline bool IsIllegalInstruction() const {
+ uint8_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
+ return FirstHalfWord == 0;
+ }
+
inline bool IsShortInstruction() const {
uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
return (FirstByte & 0x03) <= C2;
}
inline uint8_t InstructionSize() const {
- return this->IsShortInstruction() ? kShortInstrSize : kInstrSize;
+ return (FLAG_riscv_c_extension && this->IsShortInstruction())
+ ? kShortInstrSize
+ : kInstrSize;
}
// Get the raw instruction bits.
inline Instr InstructionBits() const {
- if (this->IsShortInstruction()) {
+ if (FLAG_riscv_c_extension && this->IsShortInstruction()) {
return 0x0000FFFF & (*reinterpret_cast<const ShortInstr*>(this));
}
return *reinterpret_cast<const Instr*>(this);
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
new file mode 100644
index 0000000000..4a8bb0d9ee
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
@@ -0,0 +1,265 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
+#define V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/base/template-utils.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return a2;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ // a2: start index (to supported rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a3: new target
+ // a1: target
+ // a0: number of arguments
+ // a2: start index (to supported rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a3: new target
+ // a1: target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(a1, // kApiFunctionAddress
+ a2, // kArgc
+ a3, // kCallData
+ a0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(a0, // argument count (not including receiver)
+ a2, // address of first argument
+ a1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ a0, // argument count (not including receiver)
+ a4, // address of the first argument
+ a1, // constructor to call
+ a3, // new target
+ a2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(a0, // the value to pass to the generator
+ a1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
+
+#endif // V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
deleted file mode 100644
index 23953097cd..0000000000
--- a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_RISCV64
-
-#include "src/codegen/interface-descriptors.h"
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a1, a2, a3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return a4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return a4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index ff798da0e9..801a74f569 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -61,7 +62,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -86,7 +87,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -97,7 +98,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -175,7 +176,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -184,7 +185,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Add64(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -197,13 +198,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
@@ -336,14 +337,14 @@ void MacroAssembler::RecordWrite(Register object, Register address,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK(!AreAliased(object, address, value, kScratchReg));
Ld(kScratchReg, MemOperand(address));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite,
kScratchReg, Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -353,7 +354,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -378,7 +379,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
@@ -393,6 +394,10 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addiw(rd, rs, rt.immediate() / 2);
+ addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
@@ -409,6 +414,10 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addi(rd, rs, rt.immediate() / 2);
+ addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
@@ -429,6 +438,10 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
addiw(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addiw(rd, rs, -rt.immediate() / 2);
+ addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -452,6 +465,10 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
addi(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addi(rd, rs, -rt.immediate() / 2);
+ addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
} else {
int li_count = InstrCountForLi64Bit(rt.immediate());
int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
@@ -884,6 +901,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (rt.is_reg()) {
negw(scratch, rt.rm());
sllw(scratch, rs, scratch);
@@ -908,6 +926,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (rt.is_reg()) {
negw(scratch, rt.rm());
sll(scratch, rs, scratch);
@@ -928,9 +947,10 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
- uint8_t sa, Register scratch) {
+ uint8_t sa) {
DCHECK(sa >= 1 && sa <= 31);
- Register tmp = rd == rt ? scratch : rd;
+ UseScratchRegisterScope temps(this);
+ Register tmp = rd == rt ? temps.Acquire() : rd;
DCHECK(tmp != rt);
slli(tmp, rs, sa);
Add64(rd, rt, tmp);
@@ -1215,8 +1235,9 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
-void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
- Register scratch) {
+void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Lwu(rd, rs);
Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
slli(scratch, scratch, 32);
@@ -1228,8 +1249,9 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
-void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
- Register scratch) {
+void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Sw(rd, rs);
srai(scratch, rd, 32);
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
@@ -1464,7 +1486,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
UseScratchRegisterScope temps(this);
int count = li_estimate(j.immediate(), temps.hasAvailable());
int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable());
- if (!FLAG_disable_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
+ if (FLAG_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
// Ld a Address from a constant pool.
RecordEntry((uint64_t)j.immediate(), j.rmode());
auipc(rd, 0);
@@ -1864,6 +1886,28 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
fmv_s(dst, src);
}
}
+ {
+ Label not_NaN;
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // According to the wasm spec
+ // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
+ // if input is canonical NaN, then output is canonical NaN, and if input is
+ // any other NaN, then output is any NaN with most significant bit of
+ // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
+ // src is not a NaN, branch to the label and do nothing, but if it is,
+ // fmin_d will set dst to the canonical NaN.
+ if (std::is_same<F, double>::value) {
+ feq_d(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_d(dst, src, src);
+ } else {
+ feq_s(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_s(dst, src, src);
+ }
+ bind(&not_NaN);
+ }
// If real exponent (i.e., t6 - kFloatExponentBias) is greater than
// kFloat32MantissaBits, it means the floating-point value has no fractional
@@ -2030,8 +2074,8 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
- FPURegister cmp2) {
+void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
@@ -2039,11 +2083,10 @@ void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
- Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
- FPURegister cmp2) {
+void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
@@ -2051,7 +2094,18 @@ void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
- Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
void TurboAssembler::BranchTrueShortF(Register rs, Label* target) {
@@ -2949,9 +3003,20 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
-
- if (root_array_available_ && options().isolate_independent_code &&
- target_is_isolate_independent_builtin) {
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ GenPCRelativeJump(t6, code_target_index);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
Ld(t6, MemOperand(kRootRegister, offset));
@@ -3017,8 +3082,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
- if (root_array_available_ && options().isolate_independent_code &&
- target_is_isolate_independent_builtin) {
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ GenPCRelativeJumpAndLink(t6, code_target_index);
+ bind(&skip);
+ RecordComment("]");
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadRootRelative(t6, offset);
@@ -3059,6 +3138,46 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::CallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ if (options().short_builtin_calls) {
+ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Call(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::TailCallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ if (options().short_builtin_calls) {
+ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Jump(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination) {
+ Ld(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
void TurboAssembler::PatchAndJump(Address target) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3115,16 +3234,28 @@ void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
}
}
+void TurboAssembler::GenPCRelativeJump(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
+void TurboAssembler::GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jalr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
void TurboAssembler::BranchLong(Label* L) {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64;
imm64 = branch_long_offset(L);
- DCHECK(is_int32(imm64));
- int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- auipc(t6, Hi20); // Read PC + Hi20 into scratch.
- jr(t6, Lo12); // jump PC + Hi20 + Lo12
+ GenPCRelativeJump(t6, imm64);
EmitConstPoolWithJumpIfNeeded();
}
@@ -3133,11 +3264,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64;
imm64 = branch_long_offset(L);
- DCHECK(is_int32(imm64));
- int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- auipc(t6, Hi20); // Read PC + Hi20 into scratch.
- jalr(t6, Lo12); // jump PC + Hi20 + Lo12 and read PC + 4 to ra
+ GenPCRelativeJumpAndLink(t6, imm64);
}
void TurboAssembler::DropAndRet(int drop) {
@@ -3251,14 +3378,6 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(scratch);
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- Ld(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -3294,16 +3413,10 @@ void MacroAssembler::PopStackHandler() {
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- Label NotNaN;
-
- fmv_d(dst, src);
- feq_d(scratch, src, src);
- bne(scratch, zero_reg, &NotNaN);
- RV_li(scratch, 0x7ff8000000000000ULL); // This is the canonical NaN
- fmv_d_x(dst, scratch);
- bind(&NotNaN);
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+ fsub_d(dst, src, kDoubleRegZero);
}
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
@@ -3414,7 +3527,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -3524,9 +3637,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -3540,17 +3653,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3560,9 +3675,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -3576,15 +3691,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(a1, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -3593,7 +3708,7 @@ void MacroAssembler::InvokeFunction(Register function,
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -3734,15 +3849,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
// Ld a Address from a constant pool.
// Record a value into constant pool.
- if (FLAG_disable_riscv_constant_pool) {
+ if (!FLAG_riscv_constant_pool) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
} else {
RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET);
@@ -3795,7 +3910,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -3810,11 +3925,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -3882,19 +3997,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- int stack_offset = -3 * kPointerSize;
- const int fp_offset = 1 * kPointerSize;
- addi(sp, sp, stack_offset);
- stack_offset = -stack_offset - kPointerSize;
- Sd(ra, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- Sd(fp, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- li(scratch, Operand(StackFrame::TypeToMarker(type)));
- Sd(scratch, MemOperand(sp, stack_offset));
- // Adjust FP to point to saved FP.
- DCHECK_EQ(stack_offset, 0);
- Add64(fp, sp, Operand(fp_offset));
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ Push(scratch);
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -3935,7 +4043,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
addi(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -4011,11 +4119,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Ld(cp, MemOperand(scratch));
-#ifdef DEBUG
- li(scratch,
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- Sd(a3, MemOperand(scratch));
-#endif
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch2 = temp.Acquire();
+ li(scratch2, Operand(Context::kInvalidContext));
+ Sd(scratch2, MemOperand(scratch));
+ }
// Pop the arguments, restore registers, and return.
mv(sp, fp); // Respect ABI stack constraint.
@@ -4026,7 +4135,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
add(sp, sp, argument_count);
} else {
- CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2, scratch);
+ CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2);
}
}
@@ -4054,7 +4163,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -4084,22 +4193,24 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch) {
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch) {
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4108,7 +4219,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4117,7 +4228,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK(object != kScratchReg);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
@@ -4134,7 +4245,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4151,7 +4262,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4165,7 +4276,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4193,7 +4304,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -4229,11 +4340,11 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (std::is_same<float, F_TYPE>::value) {
- CompareIsNanF32(scratch, src1, src2);
+ CompareIsNotNanF32(scratch, src1, src2);
} else {
- CompareIsNanF64(scratch, src1, src2);
+ CompareIsNotNanF64(scratch, src1, src2);
}
- BranchTrueF(scratch, &nan);
+ BranchFalseF(scratch, &nan);
if (kind == MaxMinKind::kMax) {
if (std::is_same<float, F_TYPE>::value) {
@@ -4330,11 +4441,9 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- li(scratch, function);
- CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
+ li(t6, function);
+ CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
@@ -4363,7 +4472,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// PrepareCallCFunction.
#if V8_HOST_ARCH_RISCV64
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
@@ -4387,12 +4496,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
{
- UseScratchRegisterScope temps(this);
- Register func_scratch = temps.Acquire();
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (function != func_scratch) {
- mv(func_scratch, function);
- function = func_scratch;
+ if (function != t6) {
+ mv(t6, function);
+ function = t6;
}
// Save the frame pointer and PC so that the stack layout remains
@@ -4401,7 +4507,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// 't' registers are caller-saved so this is safe as a scratch register.
Register pc_scratch = t1;
Register scratch = t2;
- DCHECK(!AreAliased(pc_scratch, scratch, function));
auipc(pc_scratch, 0);
// TODO(RISCV): Does this need an offset? It seems like this should be the
@@ -4494,12 +4599,10 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label* ret, Label*) {
- UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
- Ld(scratch,
+ Ld(t6,
MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
- Call(scratch);
+ Call(t6);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index b260f1c200..81e5565606 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -53,8 +54,6 @@ enum LiFlags {
ADDRESS_LOAD = 2
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -166,6 +165,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Compare double, if any operand is NaN, result is false except for NE
void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
+ void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
@@ -187,6 +188,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
li(rd, Operand(j), mode);
}
+ inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
+
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
@@ -197,6 +200,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
+ inline void GenPCRelativeJump(Register rd, int64_t imm32);
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
@@ -223,7 +228,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination);
+ MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
+ void CallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return CallBuiltin(static_cast<int>(builtin));
+ }
+ void CallBuiltin(int builtin_index);
+ void TailCallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return TailCallBuiltin(static_cast<int>(builtin));
+ }
+ void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -799,7 +817,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = t3);
+ void JumpIfSmi(Register value, Label* smi_label);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
Branch(dest, eq, a, Operand(b));
@@ -816,8 +834,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
static int ActivationFrameAlignment();
// Calculated scaled address (rd) as rt + rs << sa
- void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa,
- Register scratch = t3);
+ void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa);
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
@@ -953,8 +970,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -962,16 +979,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// void Pref(int32_t hint, const MemOperand& rs);
// ---------------------------------------------------------------------------
// Pseudo-instructions.
- void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
- void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
+ void LoadWordPair(Register rd, const MemOperand& rs);
+ void StoreWordPair(Register rd, const MemOperand& rs);
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
@@ -1011,7 +1028,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1022,12 +1039,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1051,18 +1065,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1131,8 +1145,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch = t3);
+ void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
@@ -1170,7 +1183,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index b97594becd..4aacad611d 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -337,7 +337,7 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-constexpr Register kOffHeapTrampolineRegister = t3;
+constexpr Register kOffHeapTrampolineRegister = t6;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;