summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen')
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc148
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h77
-rw-r--r--deps/v8/src/codegen/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h256
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc306
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc98
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h27
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h8
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h6
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc9
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h265
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc310
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h10
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc112
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h27
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h10
-rw-r--r--deps/v8/src/codegen/assembler.cc4
-rw-r--r--deps/v8/src/codegen/assembler.h27
-rw-r--r--deps/v8/src/codegen/bailout-reason.h1
-rw-r--r--deps/v8/src/codegen/code-factory.cc48
-rw-r--r--deps/v8/src/codegen/code-factory.h11
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc311
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h69
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc68
-rw-r--r--deps/v8/src/codegen/compilation-cache.h33
-rw-r--r--deps/v8/src/codegen/compiler.cc325
-rw-r--r--deps/v8/src/codegen/compiler.h9
-rw-r--r--deps/v8/src/codegen/constants-arch.h18
-rw-r--r--deps/v8/src/codegen/cpu-features.h3
-rw-r--r--deps/v8/src/codegen/external-reference-table.cc131
-rw-r--r--deps/v8/src/codegen/external-reference-table.h33
-rw-r--r--deps/v8/src/codegen/external-reference.cc8
-rw-r--r--deps/v8/src/codegen/external-reference.h10
-rw-r--r--deps/v8/src/codegen/handler-table.h4
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc77
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h17
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h267
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc318
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc144
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h236
-rw-r--r--deps/v8/src/codegen/interface-descriptors-inl.h484
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc561
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h1384
-rw-r--r--deps/v8/src/codegen/machine-type.cc2
-rw-r--r--deps/v8/src/codegen/machine-type.h29
-rw-r--r--deps/v8/src/codegen/macro-assembler.h6
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc18
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h2
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h258
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc332
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc103
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h27
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc18
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h258
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc332
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc103
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h27
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc16
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h31
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc22
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h7
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h42
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h256
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc306
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc279
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h72
-rw-r--r--deps/v8/src/codegen/register-arch.h21
-rw-r--r--deps/v8/src/codegen/register.h4
-rw-r--r--deps/v8/src/codegen/reloc-info.cc39
-rw-r--r--deps/v8/src/codegen/reloc-info.h4
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h32
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc146
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h80
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.cc5
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h15
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h265
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc313
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc367
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h59
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc4
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h256
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc306
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc155
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h41
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc58
-rw-r--r--deps/v8/src/codegen/safepoint-table.h43
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc195
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h221
-rw-r--r--deps/v8/src/codegen/signature.h8
-rw-r--r--deps/v8/src/codegen/source-position-table.cc4
-rw-r--r--deps/v8/src/codegen/source-position-table.h4
-rw-r--r--deps/v8/src/codegen/string-constants.cc3
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc7
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h33
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc66
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h11
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h258
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc309
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc363
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h178
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h6
103 files changed, 6412 insertions, 6289 deletions
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 7035fa2492..f72e27703e 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -206,7 +206,7 @@ Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
+ if (V8_UNLIKELY(buffer_space() <= kGap)) {
GrowBuffer();
}
MaybeCheckConstPool();
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 17a20a6f97..09c57928ff 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -534,9 +534,8 @@ Assembler::Assembler(const AssemblerOptions& options,
: AssemblerBase(options, std::move(buffer)),
pending_32_bit_constants_(),
scratch_register_list_(ip.bit()) {
- pending_32_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
- next_buffer_check_ = 0;
+ constant_pool_deadline_ = kMaxInt;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
@@ -556,7 +555,10 @@ Assembler::Assembler(const AssemblerOptions& options,
}
}
-Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); }
+Assembler::~Assembler() {
+ DCHECK_EQ(const_pool_blocked_nesting_, 0);
+ DCHECK_EQ(first_const_pool_32_use_, -1);
+}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
@@ -841,7 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_2 << 16
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- DCHECK(is_uint24(target24));
+ CHECK(is_uint24(target24));
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
@@ -897,7 +899,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
- DCHECK(is_int24(imm24));
+ CHECK(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@@ -1030,10 +1032,53 @@ namespace {
bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
Instr* instr) {
// imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
- if ((imm8 <= 0xFF)) {
- *rotate_imm = rot;
+ {
+ // 32-bit immediates can be encoded as:
+ // (8-bit value, 2*N bit left rotation)
+ // e.g. 0xab00 can be encoded as 0xab shifted left by 8 == 2*4, i.e.
+ // (0xab, 4)
+ //
+ // Check three categories which cover all possible shifter fits:
+ // 1. 0x000000FF: The value is already 8-bit (no shifting necessary),
+ // 2. 0x000FF000: The 8-bit value is somewhere in the middle of the 32-bit
+ // value, and
+ // 3. 0xF000000F: The 8-bit value is split over the beginning and end of
+ // the 32-bit value.
+
+ // For 0x000000FF.
+ if (imm32 <= 0xFF) {
+ *rotate_imm = 0;
+ *immed_8 = imm32;
+ return true;
+ }
+ // For 0x000FF000, count trailing zeros and shift down to 0x000000FF. Note
+ // that we have to round the trailing zeros down to the nearest multiple of
+ // two, since we can only encode shifts of 2*N. Note also that we know that
+ // imm32 isn't zero, since we already checked if it's less than 0xFF.
+ int half_trailing_zeros = base::bits::CountTrailingZerosNonZero(imm32) / 2;
+ uint32_t imm8 = imm32 >> (half_trailing_zeros * 2);
+ if (imm8 <= 0xFF) {
+ DCHECK_GT(half_trailing_zeros, 0);
+ // Rotating right by trailing_zeros is equivalent to rotating left by
+ // 32 - trailing_zeros. We return rotate_right / 2, so calculate
+ // (32 - trailing_zeros)/2 == 16 - trailing_zeros/2.
+ *rotate_imm = (16 - half_trailing_zeros);
+ *immed_8 = imm8;
+ return true;
+ }
+ // For 0xF000000F, rotate by 16 to get 0x000FF000 and continue as if it
+ // were that case.
+ uint32_t imm32_rot16 = base::bits::RotateLeft32(imm32, 16);
+ half_trailing_zeros =
+ base::bits::CountTrailingZerosNonZero(imm32_rot16) / 2;
+ imm8 = imm32_rot16 >> (half_trailing_zeros * 2);
+ if (imm8 <= 0xFF) {
+ // We've rotated left by 2*8, so we can't have more than that many
+ // trailing zeroes.
+ DCHECK_LT(half_trailing_zeros, 8);
+ // We've already rotated by 2*8, before calculating trailing_zeros/2,
+ // so we need (32 - (16 + trailing_zeros))/2 == 8 - trailing_zeros/2.
+ *rotate_imm = 8 - half_trailing_zeros;
*immed_8 = imm8;
return true;
}
@@ -2258,7 +2303,7 @@ void Assembler::bkpt(uint32_t imm16) {
}
void Assembler::svc(uint32_t imm24, Condition cond) {
- DCHECK(is_uint24(imm24));
+ CHECK(is_uint24(imm24));
emit(cond | 15 * B24 | imm24);
}
@@ -5204,8 +5249,13 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
(rmode == RelocInfo::CODE_TARGET && value != 0) ||
(RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
- if (pending_32_bit_constants_.empty()) {
+ if (first_const_pool_32_use_ < 0) {
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK_EQ(constant_pool_deadline_, kMaxInt);
first_const_pool_32_use_ = position;
+ constant_pool_deadline_ = position + kCheckPoolDeadline;
+ } else {
+ DCHECK(!pending_32_bit_constants_.empty());
}
ConstantPoolEntry entry(position, value, sharing_ok, rmode);
@@ -5224,7 +5274,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
- pending_32_bit_constants_.push_back(entry);
+ pending_32_bit_constants_.emplace_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@@ -5239,17 +5289,17 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
- // Max pool start (if we need a jump and an alignment).
-#ifdef DEBUG
- int start = pc_limit + kInstrSize + 2 * kPointerSize;
- DCHECK(pending_32_bit_constants_.empty() ||
- (start < first_const_pool_32_use_ + kMaxDistToIntPool));
-#endif
no_const_pool_before_ = pc_limit;
}
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
+ // If we're due a const pool check before the block finishes, move it to just
+ // after the block.
+ if (constant_pool_deadline_ < no_const_pool_before_) {
+ // Make sure that the new deadline isn't too late (including a jump and the
+ // constant pool marker).
+ DCHECK_LE(no_const_pool_before_,
+ first_const_pool_32_use_ + kMaxDistToIntPool);
+ constant_pool_deadline_ = no_const_pool_before_;
}
}
@@ -5265,49 +5315,44 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (pending_32_bit_constants_.empty()) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ // We should only fall into this case if we're either trying to forcing
+ // emission or opportunistically checking after a jump.
+ DCHECK(force_emit || !require_jump);
return;
}
- // Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool and the constant pool marker and
- // the gap to the relocation information).
- int jump_instr = require_jump ? kInstrSize : 0;
- int size_up_to_marker = jump_instr + kInstrSize;
- int estimated_size_after_marker =
- pending_32_bit_constants_.size() * kPointerSize;
- int estimated_size = size_up_to_marker + estimated_size_after_marker;
-
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance from the first instruction accessing the constant pool to
- // any of the constant pool entries will exceed its limit the next
- // time the pool is checked. This is overly restrictive, but we don't emit
- // constant pool entries in-order so it's conservatively correct.
+ // the first constant pool entry will exceed its limit the next time the
+ // pool is checked.
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- DCHECK(!pending_32_bit_constants_.empty());
- bool need_emit = false;
- int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
- if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
- (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
- need_emit = true;
+ DCHECK_NE(first_const_pool_32_use_, -1);
+ int dist32 = pc_offset() - first_const_pool_32_use_;
+ if (require_jump) {
+ // We should only be on this path if we've exceeded our deadline.
+ DCHECK_GE(dist32, kCheckPoolDeadline);
+ } else if (dist32 < kCheckPoolDeadline / 2) {
+ return;
}
- if (!need_emit) return;
}
- // Deduplicate constants.
- int size_after_marker = estimated_size_after_marker;
+ int size_after_marker = pending_32_bit_constants_.size() * kPointerSize;
+ // Deduplicate constants.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
if (entry.is_merged()) size_after_marker -= kPointerSize;
}
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstrSize : 0;
+ int size_up_to_marker = jump_instr + kInstrSize;
int size = size_up_to_marker + size_after_marker;
-
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@@ -5331,6 +5376,14 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
emit(kConstantPoolMarker |
EncodeConstantPoolLength(size_after_marker / kPointerSize));
+ // The first entry in the constant pool should also be the first
+ CHECK_EQ(first_const_pool_32_use_, pending_32_bit_constants_[0].position());
+ CHECK(!pending_32_bit_constants_[0].is_merged());
+
+ // Make sure we're not emitting the constant too late.
+ CHECK_LE(pc_offset(),
+ first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
+
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5354,6 +5407,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
ConstantPoolEntry& merged =
pending_32_bit_constants_[entry.merged_index()];
DCHECK(entry.value() == merged.value());
+ DCHECK_LT(merged.position(), entry.position());
Instr merged_instr = instr_at(merged.position());
DCHECK(IsLdrPcImmediateOffset(merged_instr));
delta = GetLdrRegisterImmediateOffset(merged_instr);
@@ -5379,9 +5433,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ // Since a constant pool was just emitted, we don't need another check until
+ // the next constant pool entry is added.
+ constant_pool_deadline_ = kMaxInt;
}
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index e0490a6853..04d5eef054 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -45,6 +45,7 @@
#include <memory>
#include <vector>
+#include "src/base/small-vector.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/register-arm.h"
#include "src/codegen/assembler.h"
@@ -310,7 +311,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
~Assembler() override;
- void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); }
+ void AbortedCodeGeneration() override {
+ pending_32_bit_constants_.clear();
+ first_const_pool_32_use_ = -1;
+ }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -1148,13 +1152,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int DecodeShiftImm(Instr instr);
static Instr PatchShiftImm(Instr instr, int immed);
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+ // Constants are accessed via pc relative addressing, which can reach −4095 to
+ // 4095 for integer PC-relative loads, and −1020 to 1020 for floating-point
// PC-relative loads, thereby defining a maximum distance between the
- // instruction and the accessed constant.
- static constexpr int kMaxDistToIntPool = 4 * KB;
- // All relocations could be integer, it therefore acts as the limit.
- static constexpr int kMinNumPendingConstants = 4;
+ // instruction and the accessed constant. Additionally, PC-relative loads
+ // start at a delta from the actual load instruction's PC, so we can add this
+ // on to the (positive) distance.
+ static constexpr int kMaxDistToPcRelativeConstant =
+ 4095 + Instruction::kPcLoadDelta;
+ // The constant pool needs to be jumped over, and has a marker, so the actual
+ // distance from the instruction and start of the constant pool has to include
+ // space for these two instructions.
+ static constexpr int kMaxDistToIntPool =
+ kMaxDistToPcRelativeConstant - 2 * kInstrSize;
+ // Experimentally derived as sufficient for ~95% of compiles.
+ static constexpr int kTypicalNumPending32Constants = 32;
+ // The maximum number of pending constants is reached by a sequence of only
+ // constant loads, which limits it to the number of constant loads that can
+ // fit between the first constant load and the distance to the constant pool.
static constexpr int kMaxNumPending32Constants =
kMaxDistToIntPool / kInstrSize;
@@ -1165,8 +1180,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
- void MaybeCheckConstPool() {
- if (pc_offset() >= next_buffer_check_) {
+ V8_INLINE void MaybeCheckConstPool() {
+ if (V8_UNLIKELY(pc_offset() >= constant_pool_deadline_)) {
CheckConstPool(false, true);
}
}
@@ -1192,9 +1207,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_buffer_check_ = kMaxInt;
+ // Prevent constant pool checks happening by resetting the deadline.
+ constant_pool_deadline_ = kMaxInt;
}
}
@@ -1202,19 +1216,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
+ if (first_const_pool_32_use_ >= 0) {
#ifdef DEBUG
- // Max pool start (if we need a jump and an alignment).
- int start = pc_offset() + kInstrSize + 2 * kPointerSize;
- // Check the constant pool hasn't been blocked for too long.
- DCHECK(pending_32_bit_constants_.empty() ||
- (start < first_const_pool_32_use_ + kMaxDistToIntPool));
+ // Check the constant pool hasn't been blocked for too long.
+ DCHECK_LE(pc_offset(), first_const_pool_32_use_ + kMaxDistToIntPool);
#endif
- // Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
+ // Reset the constant pool check back to the deadline.
+ constant_pool_deadline_ = first_const_pool_32_use_ + kCheckPoolDeadline;
+ }
}
}
@@ -1258,7 +1267,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
- std::vector<ConstantPoolEntry> pending_32_bit_constants_;
+ base::SmallVector<ConstantPoolEntry, kTypicalNumPending32Constants>
+ pending_32_bit_constants_;
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
@@ -1268,8 +1278,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
- int next_buffer_check_; // pc offset of next buffer check
-
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
@@ -1281,11 +1289,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// if so, a relocation info entry is associated to the constant pool entry.
// Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static constexpr int kCheckPoolIntervalInst = 32;
- static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
+ // expensive. Instead, we check once a deadline is hit; the deadline being
+ // when there is a possibility that MaybeCheckConstPool won't be called before
+ // kMaxDistToIntPoolWithHeader is exceeded. Since MaybeCheckConstPool is
+ // called in CheckBuffer, this means that kGap is an upper bound on this
+ // check. Use 2 * kGap just to give it some slack around BlockConstPoolScopes.
+ static constexpr int kCheckPoolDeadline = kMaxDistToIntPool - 2 * kGap;
+
+ // pc offset of the upcoming constant pool deadline. Equivalent to
+ // first_const_pool_32_use_ + kCheckPoolDeadline.
+ int constant_pool_deadline_;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -1298,7 +1311,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- inline void CheckBuffer();
+ V8_INLINE void CheckBuffer();
void GrowBuffer();
// Instruction generation
diff --git a/deps/v8/src/codegen/arm/cpu-arm.cc b/deps/v8/src/codegen/arm/cpu-arm.cc
index 47fe4bdb74..88491c5e51 100644
--- a/deps/v8/src/codegen/arm/cpu-arm.cc
+++ b/deps/v8/src/codegen/arm/cpu-arm.cc
@@ -6,7 +6,7 @@
#ifdef __arm__
#ifdef __QNXNTO__
#include <sys/mman.h> // for cache flushing.
-#undef MAP_TYPE // NOLINT
+#undef MAP_TYPE
#elif V8_OS_FREEBSD
#include <machine/sysarch.h> // for cache flushing
#include <sys/types.h>
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
new file mode 100644
index 0000000000..296f72d157
--- /dev/null
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
+#define V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r0, r1, r2, r3, r4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return r3;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r0 : number of arguments
+ // r1 : the target to call
+ return RegisterArray(r1, r0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r4 : arguments list length (untagged)
+ // r2 : arguments list (FixedArray)
+ return RegisterArray(r1, r0, r4, r2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r0 : number of arguments
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r1 : function template info
+ // r2 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r1, r2);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r2 : the object to spread
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r1 : the target to call
+ // r2 : the arguments list
+ return RegisterArray(r1, r2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r4 : arguments list length (untagged)
+ // r2 : arguments list (FixedArray)
+ return RegisterArray(r1, r3, r0, r4, r2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r0 : number of arguments
+ // r3 : the new target
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the object to spread
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the arguments list
+ return RegisterArray(r1, r3, r2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : allocation site or undefined
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r1); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r1, r0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r1, r0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r1, // kApiFunctionAddress
+ r2, // kArgc
+ r3, // kCallData
+ r0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r0, // argument count (not including receiver)
+ r2, // address of first argument
+ r1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r0, // argument count (not including receiver)
+ r4, // address of the first argument
+ r1, // constructor to call
+ r3, // new target
+ r2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r0, // the value to pass to the generator
+ r1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r0, r1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM
+
+#endif // V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
deleted file mode 100644
index 53992227ab..0000000000
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r0, r1, r2, r3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r1; }
-const Register LoadDescriptor::NameRegister() { return r2; }
-const Register LoadDescriptor::SlotRegister() { return r0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r1; }
-const Register StoreDescriptor::NameRegister() { return r2; }
-const Register StoreDescriptor::ValueRegister() { return r0; }
-const Register StoreDescriptor::SlotRegister() { return r4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
-const Register StoreTransitionDescriptor::MapRegister() { return r5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return r3; }
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r4 : arguments list length (untagged)
- // r2 : arguments list (FixedArray)
- Register registers[] = {r1, r0, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r2 : start index (to support rest parameters)
- // r1 : the target to call
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : function template info
- // r2 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r2 : the object to spread
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : the target to call
- // r2 : the arguments list
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r3 : the new target
- // r4 : arguments list length (untagged)
- // r2 : arguments list (FixedArray)
- Register registers[] = {r1, r3, r0, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r3 : the new target
- // r2 : start index (to support rest parameters)
- // r1 : the target to call
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r3 : the new target
- // r2 : the object to spread
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : the target to call
- // r3 : the new target
- // r2 : the arguments list
- Register registers[] = {r1, r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- // r3 : the new target
- // r2 : allocation site or undefined
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1: left operand
- // r0: right operand
- // r2: feedback slot
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1: left operand
- // r0: right operand
- // r2: feedback slot
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // kApiFunctionAddress
- r2, // kArgc
- r3, // kCallData
- r0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (not including receiver)
- r2, // address of first argument
- r1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (not including receiver)
- r4, // address of the first argument
- r1, // constructor to call
- r3, // new target
- r2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // the value to pass to the generator
- r1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0, r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index f83eee4a91..d4e12f3092 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -13,6 +13,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -59,7 +60,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -85,7 +86,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
SaveFPRegs(sp, lr);
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -96,7 +97,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
RestoreFPRegs(sp, lr);
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -660,7 +661,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -668,7 +669,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -680,7 +681,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
- save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ save_fp, remembered_set_action, SmiCheck::kOmit);
bind(&done);
}
@@ -826,7 +827,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK_NE(object, value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -837,7 +838,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -847,7 +848,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -1435,7 +1436,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
PushCommonFrame(scratch);
// Reserve room for saved entry sp.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(scratch, Operand::Zero());
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1622,7 +1623,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r0: actual arguments count
// r1: function (passed through to callee)
@@ -1722,9 +1723,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
@@ -1746,17 +1747,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
b(&done);
@@ -1773,9 +1776,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r1.
DCHECK_EQ(fun, r1);
@@ -1790,15 +1793,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r1.
DCHECK_EQ(function, r1);
@@ -1807,18 +1810,7 @@ void MacroAssembler::InvokeFunction(Register function,
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
InvokeFunctionCode(r1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r1, restart_fp);
- ldr(r1, MemOperand(r1));
- tst(r1, r1);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1993,8 +1985,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
DCHECK_EQ(builtin.address() & 1, 1);
#endif
Move(r1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2034,11 +2026,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
- if (emit_debug_code()) Check(cond, reason);
+ if (FLAG_debug_code) Check(cond, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason) {
@@ -2052,11 +2044,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2143,7 +2135,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmi);
@@ -2151,7 +2143,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(eq, AbortReason::kOperandIsNotASmi);
@@ -2159,7 +2151,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
@@ -2173,7 +2165,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
@@ -2187,7 +2179,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
@@ -2199,7 +2191,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2229,7 +2221,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
@@ -2520,7 +2512,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if V8_HOST_ARCH_ARM
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index e622d4aa17..3a54f6c45f 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -29,8 +29,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -656,16 +654,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot at |offset|
// has been written. |value| is the object being stored.
void RecordWrite(
Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -689,7 +687,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -700,13 +698,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -784,18 +779,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -874,7 +869,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index ee64dbe1f2..2668502f81 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -1072,12 +1072,12 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
inline void Assembler::CheckBufferSpace() {
DCHECK_LT(pc_, buffer_start_ + buffer_->size());
- if (buffer_space() < kGap) {
+ if (V8_UNLIKELY(buffer_space() < kGap)) {
GrowBuffer();
}
}
-inline void Assembler::CheckBuffer() {
+V8_INLINE void Assembler::CheckBuffer() {
CheckBufferSpace();
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
@@ -1085,6 +1085,10 @@ inline void Assembler::CheckBuffer() {
constpool_.MaybeCheck();
}
+EnsureSpace::EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
+ assembler->CheckBufferSpace();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index aa2ffb26cd..9d8b135954 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -2634,7 +2634,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void GrowBuffer();
- void CheckBufferSpace();
+ V8_INLINE void CheckBufferSpace();
void CheckBuffer();
// Emission of the veneer pools may be blocked in some code sequences.
@@ -2786,9 +2786,7 @@ class PatchingAssembler : public Assembler {
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
- assembler->CheckBufferSpace();
- }
+ explicit V8_INLINE EnsureSpace(Assembler* assembler);
private:
Assembler::BlockPoolsScope block_pools_scope_;
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
index d7bd4834b0..4baf2e07ec 100644
--- a/deps/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -23,7 +23,7 @@ class CacheLineSizes {
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
- __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
+ __asm__ __volatile__("mrs %x[ctr], ctr_el0"
: [ctr] "=r"(cache_type_register_));
#endif
}
@@ -64,9 +64,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
- __asm__ __volatile__( // NOLINT
- // Clean every line of the D cache containing the
- // target data.
+ __asm__ __volatile__(
+ // Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
@@ -111,7 +110,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
: [dsize] "r"(dsize), [isize] "r"(isize), [end] "r"(end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
- : "cc", "memory"); // NOLINT
+ : "cc", "memory");
#endif // V8_OS_WIN
#endif // V8_HOST_ARCH_ARM64
}
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
new file mode 100644
index 0000000000..90123dbdcb
--- /dev/null
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
@@ -0,0 +1,265 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_
+#define V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/base/template-utils.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(x0, x1, x2, x3, x4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(x0, x1, x2, x3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return x1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return x2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return x0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return x4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return x1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return x2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return x0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return x4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return x5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return x0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return x3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return x3;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(x3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // x1: target
+ // x0: number of arguments
+ return RegisterArray(x1, x0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x4 : arguments list length (untagged)
+ // x2 : arguments list (FixedArray)
+ return RegisterArray(x1, x0, x4, x2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // x1: target
+ // x0: number of arguments
+ // x2: start index (to supported rest parameters)
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // x1 : function template info
+ // x2 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(x1, x2);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x2 : the object to spread
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // x1 : the target to call
+ // x2 : the arguments list
+ return RegisterArray(x1, x2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x3 : the new target
+ // x4 : arguments list length (untagged)
+ // x2 : arguments list (FixedArray)
+ return RegisterArray(x1, x3, x0, x4, x2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ // x2: start index (to supported rest parameters)
+ return RegisterArray(x1, x3, x0, x2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : the object to spread
+ return RegisterArray(x1, x3, x0, x2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : the arguments list
+ return RegisterArray(x1, x3, x2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ // x2: allocation site or undefined
+ return RegisterArray(x1, x3, x0, x2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(x1); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ return RegisterArray(x1, x0);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ // x2: feedback slot
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ return RegisterArray(x1, x0);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ // x2: feedback slot
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(x1, // kApiFunctionAddress
+ x2, // kArgc
+ x3, // kCallData
+ x0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(x0, // argument count (not including receiver)
+ x2, // address of first argument
+ x1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ x0, // argument count (not including receiver)
+ x4, // address of the first argument
+ x1, // constructor to call
+ x3, // new target
+ x2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(x0, // the value to pass to the generator
+ x1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(x0, x1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM64
+
+#endif // V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
deleted file mode 100644
index 246d6fc961..0000000000
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {x0, x1, x2, x3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return x1; }
-const Register LoadDescriptor::NameRegister() { return x2; }
-const Register LoadDescriptor::SlotRegister() { return x0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return x4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return x1; }
-const Register StoreDescriptor::NameRegister() { return x2; }
-const Register StoreDescriptor::ValueRegister() { return x0; }
-const Register StoreDescriptor::SlotRegister() { return x4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
-const Register StoreTransitionDescriptor::MapRegister() { return x5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return x0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return x3; }
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: target
- // x0: number of arguments
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x4 : arguments list length (untagged)
- // x2 : arguments list (FixedArray)
- Register registers[] = {x1, x0, x4, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: target
- // x0: number of arguments
- // x2: start index (to supported rest parameters)
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 : function template info
- // x2 : number of arguments (on the stack, not including receiver)
- Register registers[] = {x1, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x2 : the object to spread
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 : the target to call
- // x2 : the arguments list
- Register registers[] = {x1, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x3 : the new target
- // x4 : arguments list length (untagged)
- // x2 : arguments list (FixedArray)
- Register registers[] = {x1, x3, x0, x4, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: new target
- // x1: target
- // x0: number of arguments
- // x2: start index (to supported rest parameters)
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x3 : the new target
- // x2 : the object to spread
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 : the target to call
- // x3 : the new target
- // x2 : the arguments list
- Register registers[] = {x1, x3, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: new target
- // x1: target
- // x0: number of arguments
- // x2: allocation site or undefined
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- // x2: feedback slot
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- // x2: feedback slot
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // kApiFunctionAddress
- x2, // kArgc
- x3, // kCallData
- x0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // argument count (not including receiver)
- x2, // address of first argument
- x1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // argument count (not including receiver)
- x4, // address of the first argument
- x1, // constructor to call
- x3, // new target
- x2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // the value to pass to the generator
- x1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x0, x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 6a33f864ab..8fb9bbfd7b 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -7,12 +7,12 @@
#include <ctype.h>
-#include "src/common/globals.h"
-
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
@@ -1037,7 +1037,7 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Mov(kRootRegister, Operand(isolate_root));
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- Mov(kPointerCageBaseRegister, Operand(isolate_root));
+ LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset());
#endif
}
@@ -1200,7 +1200,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
- } else if (emit_debug_code()) {
+ } else if (FLAG_debug_code) {
Cmp(xzr, offset);
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
@@ -1212,7 +1212,7 @@ template <TurboAssembler::LoadLRMode lr_mode>
void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
- } else if (emit_debug_code()) {
+ } else if (FLAG_debug_code) {
Cmp(xzr, offset);
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index a3570b8035..b18ff55455 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -10,6 +10,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reloc-info.h"
@@ -52,7 +53,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int bytes = list.Count() * kXRegSizeInBits / 8;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
@@ -69,7 +70,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
int bytes = list.Count() * kXRegSizeInBits / 8;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
@@ -79,7 +80,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PopCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
@@ -1266,7 +1267,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
void TurboAssembler::AssertSpAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
HardAbortScope hard_abort(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
@@ -1299,7 +1300,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode) {
DCHECK(!AreAliased(dst, src, count));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Register pointer1 = dst;
Register pointer2 = src;
if (mode == kSrcLessThanDst) {
@@ -1374,7 +1375,7 @@ void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label unexpected_mode, done;
UseScratchRegisterScope temps(this);
if (fpcr.IsNone()) {
@@ -1473,7 +1474,7 @@ void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
Check(eq, reason);
@@ -1481,7 +1482,7 @@ void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
}
void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
Check(ne, reason);
@@ -1489,7 +1490,7 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
UseScratchRegisterScope temps(this);
@@ -1504,7 +1505,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
UseScratchRegisterScope temps(this);
@@ -1517,7 +1518,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
UseScratchRegisterScope temps(this);
@@ -1529,7 +1530,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
@@ -1555,7 +1556,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
Label done_checking;
@@ -1569,7 +1570,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
}
void TurboAssembler::AssertPositiveOrZero(Register value) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done;
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
Tbz(value, sign_bit, &done);
@@ -1599,8 +1600,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Mov(x1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1942,7 +1943,7 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::TailCallBuiltin(int builtin_index) {
@@ -1971,7 +1972,7 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) {
Ldr(temp, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(temp);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -2059,7 +2060,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
#endif
Poke(x17, 0);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
// Verify that the slot below fp[kSPOffset]-8 points to the signed return
// location.
Ldr(x16, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -2189,7 +2190,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args,
void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register actual_argument_count, Label* done,
- InvokeFlag flag) {
+ InvokeType type) {
// x0: actual arguments count.
// x1: function (passed through to callee).
// x2: expected arguments count.
@@ -2320,9 +2321,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, x1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
@@ -2341,7 +2342,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// If actual != expected, InvokePrologue will have handled the call through
// the argument adaptor mechanism.
@@ -2352,11 +2353,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
B(&done);
@@ -2377,9 +2380,9 @@ Operand MacroAssembler::ReceiverOperand(Register arg_count) {
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(type == InvokeType::kJump || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
@@ -2400,15 +2403,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(function, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(type == InvokeType::kJump || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
@@ -2419,7 +2422,7 @@ void MacroAssembler::InvokeFunction(Register function,
FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(function, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void TurboAssembler::TryConvertDoubleToInt64(Register result,
@@ -2664,7 +2667,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Ldr(cp, MemOperand(scratch));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
// Also emit debug code to clear the cp in the top frame.
Mov(scratch2, Operand(Context::kInvalidContext));
Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
@@ -2715,15 +2718,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
IncrementCounter(counter, -value, scratch1, scratch2);
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- Mov(x1, ExternalReference::debug_restart_fp_address(isolate()));
- Ldr(x1, MemOperand(x1));
- Tst(x1, x1);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
-}
-
void MacroAssembler::JumpIfObjectType(Register object, Register map,
Register type_reg, InstanceType type,
Label* if_cond_pass, Condition cond) {
@@ -2860,14 +2854,14 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
Ldr(destination.W(), field_operand);
- Add(destination, kPointerCageBaseRegister, destination);
+ Add(destination, kPtrComprCageBaseRegister, destination);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
- Add(destination, kPointerCageBaseRegister, Operand(source, UXTW));
+ Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
RecordComment("]");
}
@@ -2875,7 +2869,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldr(destination.W(), field_operand);
- Add(destination, kPointerCageBaseRegister, destination);
+ Add(destination, kPtrComprCageBaseRegister, destination);
RecordComment("]");
}
@@ -2904,7 +2898,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip the barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -2912,7 +2906,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2924,7 +2918,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
- save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ save_fp, remembered_set_action, SmiCheck::kOmit);
Bind(&done);
}
@@ -3069,7 +3063,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
DCHECK(!AreAliased(object, value));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -3079,7 +3073,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -3089,7 +3083,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -3112,13 +3106,13 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Check(cond, reason);
}
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason) {
@@ -3133,10 +3127,10 @@ void TurboAssembler::Trap() { Brk(0); }
void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
void TurboAssembler::Abort(AbortReason reason) {
-#ifdef DEBUG
- RecordComment("Abort message: ");
- RecordComment(GetAbortReason(reason));
-#endif
+ if (FLAG_code_comments) {
+ RecordComment("Abort message: ");
+ RecordComment(GetAbortReason(reason));
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index a749676ccc..7bc6432c36 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -126,8 +126,6 @@ inline BranchType InvertBranchType(BranchType type) {
}
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
@@ -1849,17 +1847,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---- Calling / Jumping helpers ----
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
@@ -1882,7 +1880,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// 'call_kind' must be x5.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -1890,20 +1888,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count);
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// Invoke the JavaScript function in the given register.
// Changes the current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// ---- Code generation helpers ----
- // Frame restart support
- void MaybeDropFrames();
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -2032,16 +2027,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot at |offset|
// has been written. |value| is the object being stored.
void RecordWrite(
Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// ---------------------------------------------------------------------------
// Debugging.
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 819806319a..1150daf4c6 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -102,7 +102,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
static constexpr CPURegister Create(int code, int size, RegisterType type) {
- CONSTEXPR_DCHECK(IsValid(code, size, type));
+ DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
@@ -320,7 +320,7 @@ class VRegister : public CPURegister {
}
static constexpr VRegister Create(int code, int size, int lane_count = 1) {
- CONSTEXPR_DCHECK(IsValidLaneCount(lane_count));
+ DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
@@ -477,9 +477,9 @@ ALIAS_REGISTER(Register, kRootRegister, x26);
ALIAS_REGISTER(Register, rr, x26);
// Pointer cage base register.
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-ALIAS_REGISTER(Register, kPointerCageBaseRegister, x28);
+ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, x28);
#else
-ALIAS_REGISTER(Register, kPointerCageBaseRegister, kRootRegister);
+ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, kRootRegister);
#endif
// Context pointer register.
ALIAS_REGISTER(Register, cp, x27);
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 95983705ab..bb80d366de 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -69,7 +69,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
#endif
options.inline_offheap_trampolines &= !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- const base::AddressRegion& code_range = isolate->heap()->code_range();
+ const base::AddressRegion& code_range = isolate->heap()->code_region();
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
options.code_range_start = code_range.begin();
#endif
@@ -180,7 +180,6 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options,
: buffer_(std::move(buffer)),
options_(options),
enabled_cpu_features_(0),
- emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
constant_pool_available_(false),
jump_optimization_info_(nullptr) {
@@ -298,6 +297,7 @@ Handle<HeapObject> AssemblerBase::GetEmbeddedObject(
int Assembler::WriteCodeComments() {
+ if (!FLAG_code_comments) return 0;
CHECK_IMPLIES(code_comments_writer_.entry_count() > 0,
options().emit_code_comments);
if (code_comments_writer_.entry_count() == 0) return 0;
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 7066905966..98cca61a7c 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -222,9 +222,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
const AssemblerOptions& options() const { return options_; }
- bool emit_debug_code() const { return emit_debug_code_; }
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
@@ -291,7 +288,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable.
- void RecordComment(const char* msg) {
+ V8_INLINE void RecordComment(const char* msg) {
+ // Set explicit dependency on --code-comments for dead-code elimination in
+ // release builds.
+ if (!FLAG_code_comments) return;
if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::string(msg));
}
@@ -346,7 +346,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
DCHECK(!RelocInfo::IsNone(rmode));
if (options().disable_reloc_info_for_patching) return false;
if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
+ !options().record_reloc_info_for_serialization && !FLAG_debug_code) {
return false;
}
return true;
@@ -378,7 +378,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
- bool emit_debug_code_;
bool predictable_code_size_;
// Indicates whether the constant pool can be accessed, which is only possible
@@ -392,20 +391,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
friend class ConstantPoolUnavailableScope;
};
-// Avoids emitting debug code during the lifetime of this scope object.
-class V8_NODISCARD DontEmitDebugCodeScope {
- public:
- explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
- : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
- assembler_->set_emit_debug_code(false);
- }
- ~DontEmitDebugCodeScope() { assembler_->set_emit_debug_code(old_value_); }
-
- private:
- AssemblerBase* assembler_;
- bool old_value_;
-};
-
// Enable a specified feature within a scope.
class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
public:
@@ -425,7 +410,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
- ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
+ ~CpuFeatureScope() {
// Define a destructor to avoid unused variable warnings.
}
#endif
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index 57bbbca723..e8afa74e16 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -24,6 +24,7 @@ namespace internal {
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
+ V(kFpuTopIsNotZeroInDeoptimizer, "FPU TOP is not zero in deoptimizer") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index ece8200023..854969f8cb 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -31,25 +31,35 @@ Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
const ArgvMode am = argv_mode;
const bool be = builtin_exit_frame;
- if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvOnStack && !be) {
+ if (rs == 1 && sd == SaveFPRegsMode::kIgnore && am == ArgvMode::kStack &&
+ !be) {
return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kStack && be) {
return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvInRegister && !be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kRegister && !be) {
return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit);
- } else if (rs == 1 && sd == kSaveFPRegs && am == kArgvOnStack && !be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ !be) {
return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 1 && sd == kSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ be) {
return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvOnStack && !be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kStack && !be) {
return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kStack && be) {
return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvInRegister && !be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kRegister && !be) {
return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit);
- } else if (rs == 2 && sd == kSaveFPRegs && am == kArgvOnStack && !be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ !be) {
return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 2 && sd == kSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ be) {
return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, BuiltinExit);
}
@@ -70,7 +80,7 @@ Callable CodeFactory::CallApiCallback(Isolate* isolate) {
// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
- return typeof_mode == NOT_INSIDE_TYPEOF
+ return typeof_mode == TypeofMode::kNotInside
? Builtins::CallableFor(isolate, Builtins::kLoadGlobalICTrampoline)
: Builtins::CallableFor(
isolate, Builtins::kLoadGlobalICInsideTypeofTrampoline);
@@ -79,7 +89,7 @@ Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
// static
Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode) {
- return typeof_mode == NOT_INSIDE_TYPEOF
+ return typeof_mode == TypeofMode::kNotInside
? Builtins::CallableFor(isolate, Builtins::kLoadGlobalIC)
: Builtins::CallableFor(isolate,
Builtins::kLoadGlobalICInsideTypeof);
@@ -233,16 +243,6 @@ Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
}
// static
-Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kFrameDropperTrampoline);
-}
-
-// static
-Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kHandleDebuggerStatement);
-}
-
-// static
Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type) {
switch (scope_type) {
@@ -388,8 +388,8 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to
// save fpregs too.
- Handle<Code> code = CodeFactory::CEntry(isolate, result_size, kDontSaveFPRegs,
- kArgvInRegister);
+ Handle<Code> code = CodeFactory::CEntry(
+ isolate, result_size, SaveFPRegsMode::kIgnore, ArgvMode::kRegister);
if (result_size == 1) {
return Callable(code, InterpreterCEntry1Descriptor{});
} else {
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index aab2977045..e55de10533 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -28,10 +28,10 @@ class V8_EXPORT_PRIVATE CodeFactory final {
// is exported here.
static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
- static Handle<Code> CEntry(Isolate* isolate, int result_size = 1,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs,
- ArgvMode argv_mode = kArgvOnStack,
- bool builtin_exit_frame = false);
+ static Handle<Code> CEntry(
+ Isolate* isolate, int result_size = 1,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore,
+ ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false);
// Initial states for ICs.
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
@@ -49,9 +49,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ResumeGenerator(Isolate* isolate);
- static Callable FrameDropperTrampoline(Isolate* isolate);
- static Callable HandleDebuggerStatement(Isolate* isolate);
-
static Callable BinaryOperation(Isolate* isolate, Operation op);
static Callable ApiGetter(Isolate* isolate);
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 492e6aaf37..5995a766d1 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -103,7 +103,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
std::initializer_list<ExtraNode> extra_nodes) {
Label ok(this);
Label not_ok(this, Label::kDeferred);
- if (message != nullptr && FLAG_code_comments) {
+ if (message != nullptr) {
Comment("[ Assert: ", message);
} else {
Comment("[ Assert");
@@ -1368,6 +1368,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
+ if (FLAG_single_generation) flags |= kPretenured;
bool const new_space = !(flags & kPretenured);
bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
@@ -1574,8 +1575,8 @@ TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset);
if (external_pointer_tag != 0) {
- TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
- entry = UncheckedCast<UintPtrT>(WordXor(entry, tag));
+ TNode<UintPtrT> tag = UintPtrConstant(~external_pointer_tag);
+ entry = UncheckedCast<UintPtrT>(WordAnd(entry, tag));
}
return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
#else
@@ -1603,7 +1604,7 @@ void CodeStubAssembler::StoreExternalPointerToObject(
TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer);
if (external_pointer_tag != 0) {
TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
- value = UncheckedCast<UintPtrT>(WordXor(pointer, tag));
+ value = UncheckedCast<UintPtrT>(WordOr(pointer, tag));
}
StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset,
value);
@@ -1619,6 +1620,8 @@ TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<HeapObject> object, int offset) {
+ // Please use LoadMap(object) instead.
+ DCHECK_NE(offset, HeapObject::kMapOffset);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1631,6 +1634,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
TNode<HeapObject> object, int offset) {
+ // Please use LoadMap(object) instead.
+ DCHECK_NE(offset, HeapObject::kMapOffset);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1656,7 +1661,15 @@ TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
}
TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
- return LoadObjectField<Map>(object, HeapObject::kMapOffset);
+ TNode<Map> map = LoadObjectField<Map>(object, HeapObject::kMapOffset);
+#ifdef V8_MAP_PACKING
+ // Check the loaded map is unpacked. i.e. the lowest two bits != 0b10
+ CSA_ASSERT(this,
+ WordNotEqual(WordAnd(BitcastTaggedToWord(map),
+ IntPtrConstant(Internals::kMapWordXorMask)),
+ IntPtrConstant(Internals::kMapWordSignature)));
+#endif
+ return map;
}
TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) {
@@ -2033,6 +2046,13 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_strong);
}
+void CodeStubAssembler::AssertHasValidMap(TNode<HeapObject> object) {
+#ifdef V8_MAP_PACKING
+ // Test if the map is an unpacked and valid map
+ CSA_ASSERT(this, IsMap(LoadMap(object)));
+#endif
+}
+
TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
return Word32Equal(Word32And(TruncateIntPtrToInt32(
BitcastTaggedToWordForTagAndSmiBits(value)),
@@ -2943,12 +2963,14 @@ void CodeStubAssembler::StoreObjectField(TNode<HeapObject> object,
void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
TNode<HeapObject> object, int offset, TNode<Object> value) {
+ DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation::kTagged,
object, offset, value);
}
void CodeStubAssembler::StoreMap(TNode<HeapObject> object, TNode<Map> map) {
OptimizedStoreMap(object, map);
+ AssertHasValidMap(object);
}
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
@@ -2958,16 +2980,19 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
TNode<Map> map) {
- OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation::kTaggedPointer,
- object, HeapObject::kMapOffset, map);
+ OptimizedStoreMap(object, map);
+ AssertHasValidMap(object);
}
void CodeStubAssembler::StoreObjectFieldRoot(TNode<HeapObject> object,
int offset, RootIndex root_index) {
- if (RootsTable::IsImmortalImmovable(root_index)) {
- StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
+ TNode<Object> root = LoadRoot(root_index);
+ if (offset == HeapObject::kMapOffset) {
+ StoreMap(object, CAST(root));
+ } else if (RootsTable::IsImmortalImmovable(root_index)) {
+ StoreObjectFieldNoWriteBarrier(object, offset, root);
} else {
- StoreObjectField(object, offset, LoadRoot(root_index));
+ StoreObjectField(object, offset, root);
}
}
@@ -4762,7 +4787,11 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
TNode<IntPtrT> length) {
Label finished(this);
Label needs_barrier(this);
+#ifdef V8_DISABLE_WRITE_BARRIERS
+ const bool needs_barrier_check = false;
+#else
const bool needs_barrier_check = !IsDoubleElementsKind(kind);
+#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind));
@@ -4847,7 +4876,11 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
WriteBarrierMode write_barrier) {
Label finished(this);
Label needs_barrier(this);
+#ifdef V8_DISABLE_WRITE_BARRIERS
+ const bool needs_barrier_check = false;
+#else
const bool needs_barrier_check = !IsDoubleElementsKind(kind);
+#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind));
@@ -5294,6 +5327,10 @@ TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
return new_elements;
}
+template TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity<IntPtrT>(
+ TNode<HeapObject>, TNode<FixedArrayBase>, ElementsKind, ElementsKind,
+ TNode<IntPtrT>, TNode<IntPtrT>, compiler::CodeAssemblerLabel*);
+
void CodeStubAssembler::InitializeAllocationMemento(
TNode<HeapObject> base, TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site) {
@@ -6036,6 +6073,13 @@ TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
return TaggedEqual(cell_value, invalid);
}
+TNode<BoolT> CodeStubAssembler::IsMegaDOMProtectorCellInvalid() {
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = MegaDOMProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return TaggedEqual(cell_value, invalid);
+}
+
TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() {
TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = ArrayIteratorProtectorConstant();
@@ -6285,14 +6329,27 @@ TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
Int32Constant(FIRST_JS_OBJECT_TYPE));
}
+TNode<BoolT> CodeStubAssembler::IsJSApiObjectInstanceType(
+ TNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, JS_API_OBJECT_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObjectMap(TNode<Map> map) {
return IsJSObjectInstanceType(LoadMapInstanceType(map));
}
+TNode<BoolT> CodeStubAssembler::IsJSApiObjectMap(TNode<Map> map) {
+ return IsJSApiObjectInstanceType(LoadMapInstanceType(map));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObject(TNode<HeapObject> object) {
return IsJSObjectMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsJSApiObject(TNode<HeapObject> object) {
+ return IsJSApiObjectMap(LoadMap(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSFinalizationRegistryMap(TNode<Map> map) {
return InstanceTypeEqual(LoadMapInstanceType(map),
JS_FINALIZATION_REGISTRY_TYPE);
@@ -7672,15 +7729,25 @@ TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
TNode<Uint32T> CodeStubAssembler::DecodeWord32(TNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
- return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
- Int32Constant(mask >> shift)));
+ if ((std::numeric_limits<uint32_t>::max() >> shift) ==
+ ((std::numeric_limits<uint32_t>::max() & mask) >> shift)) {
+ return Unsigned(Word32Shr(word32, static_cast<int>(shift)));
+ } else {
+ return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
+ Int32Constant(mask >> shift)));
+ }
}
TNode<UintPtrT> CodeStubAssembler::DecodeWord(TNode<WordT> word, uint32_t shift,
uintptr_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
- return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
- IntPtrConstant(mask >> shift)));
+ if ((std::numeric_limits<uintptr_t>::max() >> shift) ==
+ ((std::numeric_limits<uintptr_t>::max() & mask) >> shift)) {
+ return Unsigned(WordShr(word, static_cast<int>(shift)));
+ } else {
+ return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
+ IntPtrConstant(mask >> shift)));
+ }
}
TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
@@ -8871,9 +8938,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
{
Label slow_load(this, Label::kDeferred);
- var_value = CallGetterIfAccessor(var_value.value(), object,
- var_details.value(), context,
- object, &slow_load, kCallJSGetter);
+ var_value = CallGetterIfAccessor(
+ var_value.value(), object, var_details.value(), context, object,
+ next_key, &slow_load, kCallJSGetter);
Goto(&callback);
BIND(&slow_load);
@@ -9325,8 +9392,8 @@ template void CodeStubAssembler::LoadPropertyFromDictionary(
// result of the getter call.
TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
TNode<Object> value, TNode<HeapObject> holder, TNode<Uint32T> details,
- TNode<Context> context, TNode<Object> receiver, Label* if_bailout,
- GetOwnPropertyMode mode) {
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> name,
+ Label* if_bailout, GetOwnPropertyMode mode) {
TVARIABLE(Object, var_value, value);
Label done(this), if_accessor_info(this, Label::kDeferred);
@@ -9354,13 +9421,16 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
BIND(&if_callable);
{
- // Call the accessor.
+ // Call the accessor. No need to check side-effect mode here, since it
+ // will be checked later in DebugOnFunctionCall.
var_value = Call(context, getter, receiver);
Goto(&done);
}
BIND(&if_function_template_info);
{
+ Label runtime(this, Label::kDeferred);
+ GotoIf(IsSideEffectFreeDebuggingActive(), &runtime);
TNode<HeapObject> cached_property_name = LoadObjectField<HeapObject>(
getter, FunctionTemplateInfo::kCachedPropertyNameOffset);
GotoIfNot(IsTheHole(cached_property_name), if_bailout);
@@ -9371,6 +9441,13 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
creation_context, getter, IntPtrConstant(0), receiver);
Goto(&done);
+
+ BIND(&runtime);
+ {
+ var_value = CallRuntime(Runtime::kGetProperty, context, holder, name,
+ receiver);
+ Goto(&done);
+ }
}
} else {
Goto(&done);
@@ -9505,7 +9582,7 @@ void CodeStubAssembler::TryGetOwnProperty(
}
TNode<Object> value =
CallGetterIfAccessor(var_value->value(), object, var_details->value(),
- context, receiver, if_bailout, mode);
+ context, receiver, unique_name, if_bailout, mode);
*var_value = value;
Goto(if_found_value);
}
@@ -9554,6 +9631,7 @@ void CodeStubAssembler::TryLookupElement(
BIGUINT64_ELEMENTS,
BIGINT64_ELEMENTS,
};
+ // TODO(v8:11111): Support RAB / GSAB.
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
@@ -10811,6 +10889,12 @@ void CodeStubAssembler::EmitElementStore(
TNode<Context> context, TVariable<Object>* maybe_converted_value) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
+ // TODO(v8:11111): Fast path for RAB / GSAB backed TypedArrays.
+ if (IsRabGsabTypedArrayElementsKind(elements_kind)) {
+ GotoIf(Int32TrueConstant(), bailout);
+ return;
+ }
+
TNode<FixedArrayBase> elements = LoadElements(object);
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
@@ -11057,6 +11141,8 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
// Bail out if the object is not in new space.
TNode<IntPtrT> object_word = BitcastTaggedToWord(object);
+ // TODO(v8:11641): Skip TrapAllocationMemento when allocation-site
+ // tracking is disabled.
TNode<IntPtrT> object_page = PageFromAddress(object_word);
{
TNode<IntPtrT> page_flags =
@@ -11102,15 +11188,19 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
// Memento map check.
BIND(&map_check);
{
- TNode<Object> memento_map = LoadObjectField(object, kMementoMapOffset);
- Branch(TaggedEqual(memento_map, AllocationMementoMapConstant()),
- memento_found, &no_memento_found);
+ TNode<AnyTaggedT> maybe_mapword =
+ LoadObjectField(object, kMementoMapOffset);
+ TNode<AnyTaggedT> memento_mapword =
+ LoadRootMapWord(RootIndex::kAllocationMementoMap);
+ Branch(TaggedEqual(maybe_mapword, memento_mapword), memento_found,
+ &no_memento_found);
}
BIND(&no_memento_found);
Comment("] TrapAllocationMemento");
}
TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
+ if (FLAG_enable_third_party_heap) Unreachable();
return WordAnd(address, IntPtrConstant(~kPageAlignmentMask));
}
@@ -11323,7 +11413,12 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
- TNode<Object> root_value = LoadRoot(root_index);
+ TNode<AnyTaggedT> root_value;
+ if (root_index == RootIndex::kOnePointerFillerMap) {
+ root_value = LoadRootMapWord(root_index);
+ } else {
+ root_value = LoadRoot(root_index);
+ }
BuildFastLoop<IntPtrT>(
end_offset, start_offset,
[=](TNode<IntPtrT> current) {
@@ -13597,6 +13692,149 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
}
+// ES #sec-integerindexedobjectlength
+TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss) {
+ Label is_gsab(this), is_rab(this), end(this);
+ TVARIABLE(UintPtrT, result);
+
+ Branch(IsSharedArrayBuffer(buffer), &is_gsab, &is_rab);
+ BIND(&is_gsab);
+ {
+ // Non-length-tracking GSAB-backed TypedArrays shouldn't end up here.
+ CSA_ASSERT(this, IsLengthTrackingTypedArray(array));
+ // Read the byte length from the BackingStore.
+ const TNode<ExternalReference> length_function = ExternalConstant(
+ ExternalReference::length_tracking_gsab_backed_typed_array_length());
+ TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ result = UncheckedCast<UintPtrT>(
+ CallCFunction(length_function, MachineType::UintPtr(),
+ std::make_pair(MachineType::Pointer(), isolate_ptr),
+ std::make_pair(MachineType::AnyTagged(), array)));
+ Goto(&end);
+ }
+
+ BIND(&is_rab);
+ {
+ GotoIf(IsDetachedBuffer(buffer), miss);
+
+ TNode<UintPtrT> buffer_byte_length = LoadJSArrayBufferByteLength(buffer);
+ TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
+
+ Label is_length_tracking(this), not_length_tracking(this);
+ Branch(IsLengthTrackingTypedArray(array), &is_length_tracking,
+ &not_length_tracking);
+
+ BIND(&is_length_tracking);
+ {
+ // The backing RAB might have been shrunk so that the start of the
+ // TypedArray is already out of bounds.
+ GotoIfNot(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length),
+ miss);
+ // length = (buffer_byte_length - byte_offset) / element_size
+ // Conversion to signed is OK since buffer_byte_length <
+ // JSArrayBuffer::kMaxByteLength.
+ TNode<IntPtrT> element_size =
+ RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
+ TNode<IntPtrT> length =
+ IntPtrDiv(Signed(UintPtrSub(buffer_byte_length, array_byte_offset)),
+ element_size);
+ result = Unsigned(length);
+ Goto(&end);
+ }
+
+ BIND(&not_length_tracking);
+ {
+ // Check if the backing RAB has shrunk so that the buffer is out of
+ // bounds.
+ TNode<UintPtrT> array_byte_length =
+ LoadJSArrayBufferViewByteLength(array);
+ GotoIfNot(UintPtrGreaterThanOrEqual(
+ buffer_byte_length,
+ UintPtrAdd(array_byte_offset, array_byte_length)),
+ miss);
+ result = LoadJSTypedArrayLength(array);
+ Goto(&end);
+ }
+ }
+ BIND(&end);
+ return result.value();
+}
+
+// ES #sec-integerindexedobjectbytelength
+TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayByteLength(
+ TNode<Context> context, TNode<JSTypedArray> array,
+ TNode<JSArrayBuffer> buffer) {
+ Label miss(this), end(this);
+ TVARIABLE(UintPtrT, result);
+
+ TNode<UintPtrT> length =
+ LoadVariableLengthJSTypedArrayLength(array, buffer, &miss);
+ TNode<IntPtrT> element_size =
+ RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
+ // Conversion to signed is OK since length < JSArrayBuffer::kMaxByteLength.
+ TNode<IntPtrT> byte_length = IntPtrMul(Signed(length), element_size);
+ result = Unsigned(byte_length);
+ Goto(&end);
+ BIND(&miss);
+ {
+ result = UintPtrConstant(0);
+ Goto(&end);
+ }
+ BIND(&end);
+ return result.value();
+}
+
+TNode<IntPtrT> CodeStubAssembler::RabGsabElementsKindToElementByteSize(
+ TNode<Int32T> elements_kind) {
+ TVARIABLE(IntPtrT, result);
+ Label elements_8(this), elements_16(this), elements_32(this),
+ elements_64(this), not_found(this), end(this);
+ int32_t elements_kinds[] = {
+ RAB_GSAB_UINT8_ELEMENTS, RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ RAB_GSAB_INT8_ELEMENTS, RAB_GSAB_UINT16_ELEMENTS,
+ RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_UINT32_ELEMENTS,
+ RAB_GSAB_INT32_ELEMENTS, RAB_GSAB_FLOAT32_ELEMENTS,
+ RAB_GSAB_FLOAT64_ELEMENTS, RAB_GSAB_BIGINT64_ELEMENTS,
+ RAB_GSAB_BIGUINT64_ELEMENTS};
+ Label* elements_kind_labels[] = {&elements_8, &elements_8, &elements_8,
+ &elements_16, &elements_16, &elements_32,
+ &elements_32, &elements_32, &elements_64,
+ &elements_64, &elements_64};
+ const size_t kTypedElementsKindCount =
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, &not_found, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ BIND(&elements_8);
+ {
+ result = IntPtrConstant(1);
+ Goto(&end);
+ }
+ BIND(&elements_16);
+ {
+ result = IntPtrConstant(2);
+ Goto(&end);
+ }
+ BIND(&elements_32);
+ {
+ result = IntPtrConstant(4);
+ Goto(&end);
+ }
+ BIND(&elements_64);
+ {
+ result = IntPtrConstant(8);
+ Goto(&end);
+ }
+ BIND(&not_found);
+ { Unreachable(); }
+ BIND(&end);
+ return result.value();
+}
+
TNode<JSArrayBuffer> CodeStubAssembler::GetTypedArrayBuffer(
TNode<Context> context, TNode<JSTypedArray> array) {
Label call_runtime(this), done(this);
@@ -13799,6 +14037,17 @@ TNode<BoolT> CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
+TNode<BoolT> CodeStubAssembler::IsSideEffectFreeDebuggingActive() {
+ TNode<Uint8T> debug_execution_mode = Load<Uint8T>(ExternalConstant(
+ ExternalReference::debug_execution_mode_address(isolate())));
+
+ TNode<BoolT> is_active =
+ Word32Equal(debug_execution_mode,
+ Int32Constant(DebugInfo::ExecutionMode::kSideEffects));
+
+ return is_active;
+}
+
TNode<BoolT> CodeStubAssembler::HasAsyncEventDelegate() {
const TNode<RawPtrT> async_event_delegate = Load<RawPtrT>(ExternalConstant(
ExternalReference::async_event_delegate_address(isolate())));
@@ -13837,18 +14086,8 @@ TNode<BoolT> CodeStubAssembler::
return IsSetWord32(flags, mask);
}
-TNode<BoolT> CodeStubAssembler::
- IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- TNode<Uint32T> flags) {
- return Word32NotEqual(flags, Int32Constant(0));
-}
-
TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
- uint32_t mask = Isolate::PromiseHookFields::HasContextPromiseHook::kMask |
- Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask |
- Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask |
- Isolate::PromiseHookFields::IsDebugActive::kMask;
- return IsSetWord32(flags, mask);
+ return Word32NotEqual(flags, Int32Constant(0));
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 24204d82f8..9b54b5014e 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -67,6 +67,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
AsyncIteratorValueUnwrapSharedFun) \
V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \
V(NoElementsProtector, no_elements_protector, NoElementsProtector) \
+ V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \
V(NumberStringCache, number_string_cache, NumberStringCache) \
V(PromiseAllResolveElementSharedFun, promise_all_resolve_element_shared_fun, \
PromiseAllResolveElementSharedFun) \
@@ -157,6 +158,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
V(match_symbol, match_symbol, MatchSymbol) \
V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
+ V(mega_dom_symbol, mega_dom_symbol, MegaDOMSymbol) \
V(message_string, message_string, MessageString) \
V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \
V(MinusZeroValue, minus_zero_value, MinusZero) \
@@ -1088,7 +1090,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
- std::is_convertible<TNode<T>, TNode<Object>>::value,
+ std::is_convertible<TNode<T>, TNode<Object>>::value &&
+ std::is_base_of<T, Map>::value,
+ int>::type = 0>
+ TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
+ const MachineType machine_type = offset == HeapObject::kMapOffset
+ ? MachineType::MapInHeader()
+ : MachineTypeOf<T>::value;
+ return CAST(LoadFromObject(machine_type, object,
+ IntPtrConstant(offset - kHeapObjectTag)));
+ }
+ template <class T, typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<Object>>::value &&
+ !std::is_base_of<T, Map>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
return CAST(LoadFromObject(MachineTypeOf<T>::value, object,
@@ -1163,6 +1177,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
+ if (IsMapOffsetConstant(reference.offset)) {
+ TNode<Map> map = LoadMap(CAST(reference.object));
+ DCHECK((std::is_base_of<T, Map>::value));
+ return ReinterpretCast<T>(map);
+ }
+
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
@@ -1175,6 +1195,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_same<T, MaybeObject>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
+ DCHECK(!IsMapOffsetConstant(reference.offset));
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
return UncheckedCast<T>(
@@ -1185,6 +1206,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_same<T, MaybeObject>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
+ if (IsMapOffsetConstant(reference.offset)) {
+ DCHECK((std::is_base_of<T, Map>::value));
+ return StoreMap(CAST(reference.object), ReinterpretCast<Map>(value));
+ }
MachineRepresentation rep = MachineRepresentationOf<T>::value;
StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull;
if (std::is_same<T, Smi>::value) {
@@ -1201,6 +1226,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
+ DCHECK(!IsMapOffsetConstant(reference.offset));
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
StoreToObject(MachineRepresentationOf<T>::value, reference.object, offset,
@@ -2346,6 +2372,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSProxy or an object with interceptors.
TNode<BoolT> InstanceTypeEqual(TNode<Int32T> instance_type, int type);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
+ TNode<BoolT> IsMegaDOMProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsBigInt(TNode<HeapObject> object);
@@ -2395,6 +2422,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSObjectInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(TNode<Map> map);
TNode<BoolT> IsJSObject(TNode<HeapObject> object);
+ TNode<BoolT> IsJSApiObjectInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsJSApiObjectMap(TNode<Map> map);
+ TNode<BoolT> IsJSApiObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFinalizationRegistryMap(TNode<Map> map);
TNode<BoolT> IsJSFinalizationRegistry(TNode<HeapObject> object);
TNode<BoolT> IsJSPromiseMap(TNode<Map> map);
@@ -3451,6 +3481,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Debug helpers
TNode<BoolT> IsDebugActive();
+ TNode<BoolT> IsSideEffectFreeDebuggingActive();
// JSArrayBuffer helpers
TNode<RawPtrT> LoadJSArrayBufferBackingStorePtr(
@@ -3464,6 +3495,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSArrayBufferView> array_buffer_view);
TNode<UintPtrT> LoadJSArrayBufferViewByteLength(
TNode<JSArrayBufferView> array_buffer_view);
+
TNode<UintPtrT> LoadJSArrayBufferViewByteOffset(
TNode<JSArrayBufferView> array_buffer_view);
void ThrowIfArrayBufferViewBufferIsDetached(
@@ -3472,6 +3504,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSTypedArray helpers
TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
+ // Helper for length tracking JSTypedArrays and JSTypedArrays backed by
+ // ResizableArrayBuffer.
+ TNode<UintPtrT> LoadVariableLengthJSTypedArrayLength(
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss);
+ // Helper for length tracking JSTypedArrays and JSTypedArrays backed by
+ // ResizableArrayBuffer.
+ TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
+ TNode<Context> context, TNode<JSTypedArray> array,
+ TNode<JSArrayBuffer> buffer);
+ TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
+ TNode<Int32T> elementsKind);
TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array);
TNode<JSArrayBuffer> GetTypedArrayBuffer(TNode<Context> context,
TNode<JSTypedArray> array);
@@ -3528,13 +3571,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
PromiseHookFlags());
}
- TNode<BoolT> IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- TNode<Uint32T> flags);
- TNode<BoolT>
- IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
- return IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
- PromiseHookFlags());
- }
TNode<BoolT> NeedsAnyPromiseHooks(TNode<Uint32T> flags);
TNode<BoolT> NeedsAnyPromiseHooks() {
@@ -3600,6 +3636,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
int32_t ConstexprWord32Or(int32_t a, int32_t b) { return a | b; }
+ uint32_t ConstexprWord32Shl(uint32_t a, int32_t b) { return a << b; }
bool ConstexprUintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; }
@@ -3712,12 +3749,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
const ForEachKeyValueFunction& body,
Label* bailout);
- TNode<Object> CallGetterIfAccessor(TNode<Object> value,
- TNode<HeapObject> holder,
- TNode<Uint32T> details,
- TNode<Context> context,
- TNode<Object> receiver, Label* if_bailout,
- GetOwnPropertyMode mode = kCallJSGetter);
+ TNode<Object> CallGetterIfAccessor(
+ TNode<Object> value, TNode<HeapObject> holder, TNode<Uint32T> details,
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> name,
+ Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter);
TNode<IntPtrT> TryToIntptr(TNode<Object> key, Label* if_not_intptr,
TVariable<Int32T>* var_instance_type = nullptr);
@@ -3916,6 +3951,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CodeAssembler::LoadRoot(root_index);
}
+ TNode<AnyTaggedT> LoadRootMapWord(RootIndex root_index) {
+ return CodeAssembler::LoadRootMapWord(root_index);
+ }
+
template <typename TIndex>
void StoreFixedArrayOrPropertyArrayElement(
TNode<UnionT<FixedArray, PropertyArray>> array, TNode<TIndex> index,
@@ -3955,6 +3994,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Number>* var_result,
Label* if_bailout);
+ void AssertHasValidMap(TNode<HeapObject> object);
+
template <typename TValue>
void EmitElementStoreTypedArray(TNode<JSTypedArray> typed_array,
TNode<IntPtrT> key, TNode<Object> value,
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 3941e56e6a..ee50f8b015 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -29,10 +29,9 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate),
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
- code_(isolate),
enabled_script_and_eval_(true) {
CompilationSubCache* subcaches[kSubCacheCount] = {
- &script_, &eval_global_, &eval_contextual_, &reg_exp_, &code_};
+ &script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
}
@@ -77,10 +76,6 @@ void CompilationCacheScript::Age() {
}
void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
-void CompilationCacheCode::Age() {
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
- AgeByGeneration(this);
-}
void CompilationSubCache::Iterate(RootVisitor* v) {
v->VisitRootPointers(Root::kCompilationCache, nullptr,
@@ -267,58 +262,6 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data));
}
-MaybeHandle<Code> CompilationCacheCode::Lookup(Handle<SharedFunctionInfo> key) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- HandleScope scope(isolate());
- MaybeHandle<Code> maybe_value;
- int generation = 0;
- for (; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- maybe_value = table->LookupCode(key);
- if (!maybe_value.is_null()) break;
- }
-
- if (maybe_value.is_null()) {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return MaybeHandle<Code>();
- }
-
- Handle<Code> value = maybe_value.ToHandleChecked();
- if (generation != 0) Put(key, value); // Add to the first generation.
- isolate()->counters()->compilation_cache_hits()->Increment();
- return scope.CloseAndEscape(value);
-}
-
-void CompilationCacheCode::Put(Handle<SharedFunctionInfo> key,
- Handle<Code> value) {
- HandleScope scope(isolate());
- Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::PutCode(isolate(), table, key, value));
-}
-
-void CompilationCacheCode::TraceAgeing() {
- DCHECK(FLAG_trace_turbo_nci);
- StdoutStream os;
- os << "NCI cache ageing: Removing oldest generation" << std::endl;
-}
-
-void CompilationCacheCode::TraceInsertion(Handle<SharedFunctionInfo> key,
- Handle<Code> value) {
- DCHECK(FLAG_trace_turbo_nci);
- StdoutStream os;
- os << "NCI cache insertion: " << Brief(*key) << ", " << Brief(*value)
- << std::endl;
-}
-
-void CompilationCacheCode::TraceHit(Handle<SharedFunctionInfo> key,
- Handle<Code> value) {
- DCHECK(FLAG_trace_turbo_nci);
- StdoutStream os;
- os << "NCI cache hit: " << Brief(*key) << ", " << Brief(*value) << std::endl;
-}
-
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabledScriptAndEval()) return;
@@ -372,10 +315,6 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return reg_exp_.Lookup(source, flags);
}
-MaybeHandle<Code> CompilationCache::LookupCode(Handle<SharedFunctionInfo> sfi) {
- return code_.Lookup(sfi);
-}
-
void CompilationCache::PutScript(Handle<String> source,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
@@ -414,11 +353,6 @@ void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
reg_exp_.Put(source, flags, data);
}
-void CompilationCache::PutCode(Handle<SharedFunctionInfo> shared,
- Handle<Code> code) {
- code_.Put(shared, code);
-}
-
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 0ed13e53b6..d4f4ae52dc 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -150,32 +150,6 @@ class CompilationCacheRegExp : public CompilationSubCache {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
-// Sub-cache for Code objects. All code inserted into this cache must
-// be usable across different native contexts.
-class CompilationCacheCode : public CompilationSubCache {
- public:
- explicit CompilationCacheCode(Isolate* isolate)
- : CompilationSubCache(isolate, kGenerations) {}
-
- MaybeHandle<Code> Lookup(Handle<SharedFunctionInfo> key);
- void Put(Handle<SharedFunctionInfo> key, Handle<Code> value);
-
- void Age() override;
-
- // TODO(jgruber,v8:8888): For simplicity we use the generational
- // approach here, but could consider something else (or more
- // generations) in the future.
- static constexpr int kGenerations = 2;
-
- static void TraceAgeing();
- static void TraceInsertion(Handle<SharedFunctionInfo> key,
- Handle<Code> value);
- static void TraceHit(Handle<SharedFunctionInfo> key, Handle<Code> value);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheCode);
-};
-
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// the source string as the key. For regular expressions the
@@ -206,8 +180,6 @@ class V8_EXPORT_PRIVATE CompilationCache {
MaybeHandle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
- MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> sfi);
-
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, LanguageMode language_mode,
@@ -225,8 +197,6 @@ class V8_EXPORT_PRIVATE CompilationCache {
void PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
- void PutCode(Handle<SharedFunctionInfo> shared, Handle<Code> code);
-
// Clear the cache - also used to initialize the cache at startup.
void Clear();
@@ -269,9 +239,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
CompilationCacheEval eval_global_;
CompilationCacheEval eval_contextual_;
CompilationCacheRegExp reg_exp_;
- CompilationCacheCode code_;
- static constexpr int kSubCacheCount = 5;
+ static constexpr int kSubCacheCount = 4;
CompilationSubCache* subcaches_[kSubCacheCount];
// Current enable state of the compilation cache for scripts and eval.
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index e46639d90a..9de4ae24a3 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -559,10 +559,10 @@ void InstallInterpreterTrampolineCopy(
script_name, line_num, column_num));
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<SharedFunctionInfo> shared_info,
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (compilation_info->has_bytecode_array()) {
DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once.
DCHECK(!compilation_info->has_asm_wasm_data());
@@ -585,7 +585,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
#if V8_ENABLE_WEBASSEMBLY
DCHECK(compilation_info->has_asm_wasm_data());
// We should only have asm/wasm data when finalizing on the main thread.
- DCHECK((std::is_same<LocalIsolate, Isolate>::value));
+ DCHECK((std::is_same<IsolateT, Isolate>::value));
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
shared_info->set_feedback_metadata(
ReadOnlyRoots(isolate).empty_feedback_metadata());
@@ -606,13 +606,15 @@ void LogUnoptimizedCompilation(Isolate* isolate,
RecordUnoptimizedCompilationStats(isolate, shared_info);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void EnsureSharedFunctionInfosArrayOnScript(Handle<Script> script,
ParseInfo* parse_info,
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
DCHECK(parse_info->flags().is_toplevel());
- if (script->shared_function_infos().length() > 0) {
- DCHECK_EQ(script->shared_function_infos().length(),
+ if (script->shared_function_info_count() > 0) {
+ DCHECK_LE(script->shared_function_info_count(),
+ script->shared_function_infos().length());
+ DCHECK_EQ(script->shared_function_info_count(),
parse_info->max_function_literal_id() + 1);
return;
}
@@ -640,26 +642,6 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
-bool CanCompileWithBaseline(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) {
- // Check if we actually have bytecode.
- if (!shared->HasBytecodeArray()) return false;
-
- // Do not optimize when debugger needs to hook into every call.
- if (isolate->debug()->needs_check_on_function_call()) return false;
-
- // Functions with breakpoints have to stay interpreted.
- if (shared->HasBreakInfo()) return false;
-
- // Do not baseline compile if sparkplug is disabled or function doesn't pass
- // sparkplug_filter.
- if (!FLAG_sparkplug || !shared->PassesFilter(FLAG_sparkplug_filter)) {
- return false;
- }
-
- return true;
-}
-
bool CompileSharedWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Compiler::ClearExceptionFlag flag,
@@ -671,7 +653,7 @@ bool CompileSharedWithBaseline(Isolate* isolate,
if (shared->HasBaselineData()) return true;
// Check if we actually can compile with baseline.
- if (!CanCompileWithBaseline(isolate, shared)) return false;
+ if (!CanCompileWithBaseline(isolate, *shared)) return false;
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
@@ -714,10 +696,10 @@ bool CompileSharedWithBaseline(Isolate* isolate,
// Finalize a single compilation job. This function can return
// RETRY_ON_MAIN_THREAD if the job cannot be finalized off-thread, in which case
// it should be safe to call it again on the main thread with the same job.
-template <typename LocalIsolate>
+template <typename IsolateT>
CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
- LocalIsolate* isolate,
+ IsolateT* isolate,
FinalizeUnoptimizedCompilationDataList*
finalize_unoptimized_compilation_data_list) {
UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
@@ -736,9 +718,8 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
isolate, shared_info, coverage_info, job->time_taken_to_execute(),
job->time_taken_to_finalize());
}
- DCHECK_IMPLIES(
- status == CompilationJob::RETRY_ON_MAIN_THREAD,
- (std::is_same<LocalIsolate, v8::internal::LocalIsolate>::value));
+ DCHECK_IMPLIES(status == CompilationJob::RETRY_ON_MAIN_THREAD,
+ (std::is_same<IsolateT, LocalIsolate>::value));
return status;
}
@@ -801,9 +782,9 @@ bool RecursivelyExecuteUnoptimizedCompilationJobs(
return true;
}
-template <typename LocalIsolate>
+template <typename IsolateT>
bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
- LocalIsolate* isolate, Handle<SharedFunctionInfo> outer_shared_info,
+ IsolateT* isolate, Handle<SharedFunctionInfo> outer_shared_info,
Handle<Script> script, ParseInfo* parse_info,
AccountingAllocator* allocator, IsCompiledScope* is_compiled_scope,
FinalizeUnoptimizedCompilationDataList*
@@ -849,7 +830,7 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
case CompilationJob::RETRY_ON_MAIN_THREAD:
// This should not happen on the main thread.
- DCHECK((!std::is_same<LocalIsolate, Isolate>::value));
+ DCHECK((!std::is_same<IsolateT, Isolate>::value));
DCHECK_NOT_NULL(jobs_to_retry_finalization_on_main_thread);
// Clear the literal and ParseInfo to prevent further attempts to
@@ -943,11 +924,9 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BytecodeOffset osr_offset,
CodeKind code_kind) {
- RuntimeCallTimerScope runtimeTimer(
- function->GetIsolate(),
- RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
- Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
Isolate* isolate = function->GetIsolate();
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DisallowGarbageCollection no_gc;
Code code;
if (osr_offset.IsNone() && function->has_feedback_vector()) {
@@ -975,8 +954,6 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
}
void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
- DCHECK(!CodeKindIsNativeContextIndependentJSFunction(
- compilation_info->code_kind()));
Handle<JSFunction> function = compilation_info->closure();
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
@@ -1015,31 +992,6 @@ void InsertCodeIntoOptimizedCodeCache(
}
}
-void InsertCodeIntoCompilationCache(Isolate* isolate,
- OptimizedCompilationInfo* info) {
- if (!CodeKindIsNativeContextIndependentJSFunction(info->code_kind())) return;
-
- DCHECK(info->osr_offset().IsNone());
-
- Handle<Code> code = info->code();
- DCHECK(!info->function_context_specializing());
-
- Handle<SharedFunctionInfo> sfi = info->shared_info();
- CompilationCache* cache = isolate->compilation_cache();
- cache->PutCode(sfi, code);
- DCHECK(!cache->LookupCode(sfi).is_null());
-
- sfi->set_may_have_cached_code(true);
-
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceInsertion(sfi, code);
-}
-
-V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromCompilationCache(
- Isolate* isolate, Handle<SharedFunctionInfo> shared) {
- if (!shared->may_have_cached_code()) return {};
- return shared->TryGetCachedCode(isolate);
-}
-
// Runs PrepareJob in the proper compilation & canonical scopes. Handles will be
// allocated in a persistent handle scope that is detached and handed off to the
// {compilation_info} after PrepareJob.
@@ -1054,8 +1006,7 @@ bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeNonConcurrent");
@@ -1111,8 +1062,7 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
}
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeConcurrentPrepare);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeConcurrentPrepare);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentPrepare");
@@ -1134,11 +1084,10 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
}
- // Note: Usually the active tier is expected to be Ignition or NCI at this
- // point (in other words we don't expect to optimize if the function is
- // already TF-optimized). There is a special case for OSR though, for which
- // we *can* reach this point even if we've already generated non-OSR'd TF
- // code.
+ // Note: Usually the active tier is expected to be Ignition at this point (in
+ // other words we don't expect to optimize if the function is already
+ // TF-optimized). There is a special case for OSR though, for which we *can*
+ // reach this point even if we've already generated non-OSR'd TF code.
DCHECK(function->shared().HasBytecodeArray());
return true;
}
@@ -1148,13 +1097,7 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
Handle<Code> ContinuationForConcurrentOptimization(
Isolate* isolate, Handle<JSFunction> function) {
Handle<Code> cached_code;
- if (FLAG_turbo_nci && function->NextTier() == CodeKindForTopTier() &&
- GetCodeFromCompilationCache(isolate, handle(function->shared(), isolate))
- .ToHandle(&cached_code)) {
- // Tiering up to Turbofan and cached optimized code exists. Continue
- // execution there until TF optimization has finished.
- return cached_code;
- } else if (FLAG_turboprop && function->HasAvailableOptimizedCode()) {
+ if (FLAG_turboprop && function->HasAvailableOptimizedCode()) {
DCHECK(!FLAG_turboprop_as_toptier);
DCHECK(function->NextTier() == CodeKind::TURBOFAN);
// It is possible that we have marked a closure for TurboFan optimization
@@ -1179,23 +1122,26 @@ Handle<Code> ContinuationForConcurrentOptimization(
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
+enum class GetOptimizedCodeResultHandling {
+ // Default behavior, i.e. install the result, insert into caches, etc.
+ kDefault,
+ // Used only for stress testing. The compilation result should be discarded.
+ kDiscardForTesting,
+};
+
MaybeHandle<Code> GetOptimizedCode(
- Handle<JSFunction> function, ConcurrencyMode mode, CodeKind code_kind,
- BytecodeOffset osr_offset = BytecodeOffset::None(),
- JavaScriptFrame* osr_frame = nullptr) {
+ Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
+ CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
+ JavaScriptFrame* osr_frame = nullptr,
+ GetOptimizedCodeResultHandling result_handling =
+ GetOptimizedCodeResultHandling::kDefault) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
- Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
// Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize.
- // If compiling for NCI (which does not use the optimization marker), don't
- // touch the marker to avoid interfering with Turbofan compilation.
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
- function->HasOptimizationMarker()) {
- function->ClearOptimizationMarker();
- }
+ if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
if (shared->optimization_disabled() &&
shared->disable_optimization_reason() == BailoutReason::kNeverOptimize) {
@@ -1233,25 +1179,9 @@ MaybeHandle<Code> GetOptimizedCode(
DCHECK(shared->is_compiled());
function->feedback_vector().set_profiler_ticks(0);
- // Check the compilation cache (stored on the Isolate, shared between native
- // contexts).
- if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
- DCHECK(osr_offset.IsNone());
-
- Handle<Code> cached_code;
- if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
- CHECK_EQ(cached_code->kind(), CodeKind::NATIVE_CONTEXT_INDEPENDENT);
- if (FLAG_trace_turbo_nci) {
- CompilationCacheCode::TraceHit(shared, cached_code);
- }
- return cached_code;
- }
- }
-
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kOptimizeCode);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
DCHECK(!isolate->has_pending_exception());
@@ -1265,6 +1195,10 @@ MaybeHandle<Code> GetOptimizedCode(
has_script, osr_offset, osr_frame));
OptimizedCompilationInfo* compilation_info = job->compilation_info();
+ if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
+ compilation_info->set_discard_result_for_testing();
+ }
+
// Prepare the job and launch concurrent compilation, or compile now.
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
@@ -1274,7 +1208,6 @@ MaybeHandle<Code> GetOptimizedCode(
} else {
DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
- InsertCodeIntoCompilationCache(isolate, compilation_info);
return compilation_info->code();
}
}
@@ -1283,13 +1216,30 @@ MaybeHandle<Code> GetOptimizedCode(
return {};
}
+// When --stress-concurrent-inlining is enabled, spawn concurrent jobs in
+// addition to non-concurrent compiles to increase coverage in mjsunit tests
+// (where most interesting compiles are non-concurrent). The result of the
+// compilation is thrown out.
+void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
+ Handle<JSFunction> function,
+ ConcurrencyMode mode,
+ CodeKind code_kind) {
+ DCHECK(FLAG_stress_concurrent_inlining &&
+ isolate->concurrent_recompilation_enabled() &&
+ mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->node_observer() == nullptr);
+ USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
+ code_kind, BytecodeOffset::None(), nullptr,
+ GetOptimizedCodeResultHandling::kDiscardForTesting));
+}
+
bool FailAndClearPendingException(Isolate* isolate) {
isolate->clear_pending_exception();
return false;
}
-template <typename LocalIsolate>
-bool PreparePendingException(LocalIsolate* isolate, ParseInfo* parse_info) {
+template <typename IsolateT>
+bool PreparePendingException(IsolateT* isolate, ParseInfo* parse_info) {
if (parse_info->pending_error_handler()->has_pending_error()) {
parse_info->pending_error_handler()->PrepareErrors(
isolate, parse_info->ast_value_factory());
@@ -1389,8 +1339,7 @@ void FinalizeUnoptimizedScriptCompilation(
FunctionLiteral* literal = it.first;
CompilerDispatcher::JobId job_id = it.second;
MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
- script->FindSharedFunctionInfo(isolate,
- literal->function_literal_id());
+ Script::FindSharedFunctionInfo(script, isolate, literal);
Handle<SharedFunctionInfo> shared_for_task;
if (maybe_shared_for_task.ToHandle(&shared_for_task)) {
dispatcher->RegisterSharedFunctionInfo(job_id, *shared_for_task);
@@ -1412,7 +1361,7 @@ void CompileAllWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
IsCompiledScope is_compiled_scope(*shared_info, isolate);
if (!is_compiled_scope.is_compiled()) continue;
- if (!CanCompileWithBaseline(isolate, shared_info)) continue;
+ if (!CanCompileWithBaseline(isolate, *shared_info)) continue;
CompileSharedWithBaseline(isolate, shared_info, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
@@ -1420,9 +1369,9 @@ void CompileAllWithBaseline(Isolate* isolate,
// Create shared function info for top level and shared function infos array for
// inner functions.
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SharedFunctionInfo> CreateTopLevelSharedFunctionInfo(
- ParseInfo* parse_info, Handle<Script> script, LocalIsolate* isolate) {
+ ParseInfo* parse_info, Handle<Script> script, IsolateT* isolate) {
EnsureSharedFunctionInfosArrayOnScript(script, parse_info, isolate);
DCHECK_EQ(kNoSourcePosition,
parse_info->literal()->function_token_position());
@@ -1440,10 +1389,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
- RuntimeCallTimerScope runtimeTimer(
- isolate, parse_info->flags().is_eval()
- ? RuntimeCallCounterId::kCompileEval
- : RuntimeCallCounterId::kCompileScript);
+ RCS_SCOPE(isolate, parse_info->flags().is_eval()
+ ? RuntimeCallCounterId::kCompileEval
+ : RuntimeCallCounterId::kCompileScript);
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
@@ -1494,6 +1442,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
return shared_info;
}
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
ParseInfo* parse_info) {
if (parse_info->flags().is_toplevel()) {
@@ -1504,6 +1453,7 @@ RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
}
return RuntimeCallCounterId::kCompileBackgroundFunction;
}
+#endif // V8_RUNTIME_CALL_STATS
MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
ParseInfo* parse_info, AccountingAllocator* allocator,
@@ -1514,9 +1464,8 @@ MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
IsCompiledScope* is_compiled_scope) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
+ RCS_SCOPE(parse_info->runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(parse_info));
Handle<SharedFunctionInfo> shared_info =
CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
@@ -1541,9 +1490,8 @@ void CompileOnBackgroundThread(ParseInfo* parse_info,
DisallowHeapAccess no_heap_access;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
+ RCS_SCOPE(parse_info->runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(parse_info));
// Generate the unoptimized bytecode or asm-js data.
DCHECK(jobs->empty());
@@ -1557,6 +1505,7 @@ void CompileOnBackgroundThread(ParseInfo* parse_info,
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
}
+
} // namespace
CompilationHandleScope::~CompilationHandleScope() {
@@ -1660,8 +1609,8 @@ class V8_NODISCARD OffThreadParseInfoScope {
ParseInfo* parse_info,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats, int stack_size)
: parse_info_(parse_info),
- original_runtime_call_stats_(parse_info_->runtime_call_stats()),
original_stack_limit_(parse_info_->stack_limit()),
+ original_runtime_call_stats_(parse_info_->runtime_call_stats()),
worker_thread_scope_(worker_thread_runtime_stats) {
parse_info_->SetPerThreadState(GetCurrentStackPosition() - stack_size * KB,
worker_thread_scope_.Get());
@@ -1678,8 +1627,8 @@ class V8_NODISCARD OffThreadParseInfoScope {
private:
ParseInfo* parse_info_;
- RuntimeCallStats* original_runtime_call_stats_;
uintptr_t original_stack_limit_;
+ RuntimeCallStats* original_runtime_call_stats_;
WorkerThreadRuntimeCallStatsScope worker_thread_scope_;
};
@@ -1692,9 +1641,8 @@ void BackgroundCompileTask::Run() {
stack_size_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"BackgroundCompileTask::Run");
- RuntimeCallTimerScope runtimeTimer(
- info_->runtime_call_stats(),
- RuntimeCallCounterId::kCompileBackgroundCompileTask);
+ RCS_SCOPE(info_->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileBackgroundCompileTask);
// Update the character stream's runtime call stats.
info_->character_stream()->set_runtime_call_stats(
@@ -1817,8 +1765,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
DCHECK(!isolate->has_pending_exception());
VMState<BYTECODE_COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileCollectSourcePositions);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileCollectSourcePositions);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CollectSourcePositions");
HistogramTimerScope timer(isolate->counters()->collect_source_positions());
@@ -1894,8 +1841,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
VMState<BYTECODE_COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kCompileFunction);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
@@ -2000,9 +1946,19 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function,
CodeKindForTopTier());
+ const CodeKind code_kind = CodeKindForTopTier();
+ const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+
+ if (FLAG_stress_concurrent_inlining &&
+ isolate->concurrent_recompilation_enabled() &&
+ concurrency_mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->node_observer() == nullptr) {
+ SpawnDuplicateConcurrentJobForStressTesting(isolate, function,
+ concurrency_mode, code_kind);
+ }
+
Handle<Code> maybe_code;
- if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
- CodeKindForTopTier())
+ if (GetOptimizedCode(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
}
@@ -2058,8 +2014,8 @@ bool Compiler::FinalizeBackgroundCompileTask(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
HandleScope scope(isolate);
ParseInfo* parse_info = task->info();
DCHECK(!parse_info->flags().is_toplevel());
@@ -2099,8 +2055,16 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
DCHECK(AllowCompilation::IsAllowed(isolate));
+ if (FLAG_stress_concurrent_inlining &&
+ isolate->concurrent_recompilation_enabled() &&
+ mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->node_observer() == nullptr) {
+ SpawnDuplicateConcurrentJobForStressTesting(isolate, function, mode,
+ code_kind);
+ }
+
Handle<Code> code;
- if (!GetOptimizedCode(function, mode, code_kind).ToHandle(&code)) {
+ if (!GetOptimizedCode(isolate, function, mode, code_kind).ToHandle(&code)) {
// Optimization failed, get the existing code. We could have optimized code
// from a lower tier here. Unoptimized code must exist already if we are
// optimizing.
@@ -2110,23 +2074,18 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
code = ContinuationForConcurrentOptimization(isolate, function);
}
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
- function->set_code(*code, kReleaseStore);
- }
+ function->set_code(*code, kReleaseStore);
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
- DCHECK(CodeKindIsNativeContextIndependentJSFunction(code_kind) ||
- function->is_compiled());
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->IsInOptimizationQueue());
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->ChecksOptimizationMarker());
- DCHECK_IMPLIES(function->IsInOptimizationQueue(),
- mode == ConcurrencyMode::kConcurrent);
- }
+ DCHECK(function->is_compiled());
+ DCHECK_IMPLIES(function->HasOptimizationMarker(),
+ function->IsInOptimizationQueue());
+ DCHECK_IMPLIES(function->HasOptimizationMarker(),
+ function->ChecksOptimizationMarker());
+ DCHECK_IMPLIES(function->IsInOptimizationQueue(),
+ mode == ConcurrencyMode::kConcurrent);
return true;
}
@@ -2281,8 +2240,7 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
// Callback set. Let it decide if code generation is allowed.
VMState<EXTERNAL> state(isolate);
- RuntimeCallTimerScope timer(
- isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
@@ -2301,8 +2259,7 @@ bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
// Callback set. Run it, and use the return value as source, or block
// execution if it's not set.
VMState<EXTERNAL> state(isolate);
- RuntimeCallTimerScope timer(
- isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
ModifyCodeGenerationFromStringsResult result =
isolate->modify_code_gen_callback()
? isolate->modify_code_gen_callback()(v8::Utils::ToLocal(context),
@@ -2881,8 +2838,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileDeserialize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
@@ -2894,7 +2850,6 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
is_compiled_scope = inner_result->is_compiled_scope(isolate);
DCHECK(is_compiled_scope.is_compiled());
compilation_cache->PutScript(source, language_mode, inner_result);
- Handle<Script> script(Script::cast(inner_result->script()), isolate);
maybe_result = inner_result;
} else {
// Deserializer failed. Fall through to compile.
@@ -2970,8 +2925,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileDeserialize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
maybe_result = CodeSerializer::Deserialize(isolate, cached_data, source,
@@ -3074,8 +3028,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
Handle<Script> script;
if (FLAG_finalize_streaming_on_background && !origin_options.IsModule()) {
- RuntimeCallTimerScope runtimeTimerScope(
- isolate, RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish");
@@ -3163,15 +3117,14 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
}
// static
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
- FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate) {
+ FunctionLiteral* literal, Handle<Script> script, IsolateT* isolate) {
// Precondition: code has been parsed and scopes have been analyzed.
MaybeHandle<SharedFunctionInfo> maybe_existing;
// Find any previously allocated shared function info for the given literal.
- maybe_existing =
- script->FindSharedFunctionInfo(isolate, literal->function_literal_id());
+ maybe_existing = Script::FindSharedFunctionInfo(script, isolate, literal);
// If we found an existing shared function info, return it.
Handle<SharedFunctionInfo> existing;
@@ -3215,12 +3168,13 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
-MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
+MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Isolate* isolate,
+ Handle<JSFunction> function,
BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_offset.IsNone());
DCHECK_NOT_NULL(osr_frame);
- return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
+ return GetOptimizedCode(isolate, function, ConcurrencyMode::kNotConcurrent,
CodeKindForOSR(), osr_offset, osr_frame);
}
@@ -3233,17 +3187,14 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
OptimizedCompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeConcurrentFinalize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeConcurrentFinalize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentFinalize");
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- CodeKind code_kind = compilation_info->code_kind();
- const bool should_install_code_on_function =
- !CodeKindIsNativeContextIndependentJSFunction(code_kind);
- if (should_install_code_on_function) {
+ const bool use_result = !compilation_info->discard_result_for_testing();
+ if (V8_LIKELY(use_result)) {
// Reset profiler ticks, function is no longer considered hot.
compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
}
@@ -3263,10 +3214,9 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
isolate);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
- InsertCodeIntoOptimizedCodeCache(compilation_info);
- InsertCodeIntoCompilationCache(isolate, compilation_info);
- CompilerTracer::TraceCompletedJob(isolate, compilation_info);
- if (should_install_code_on_function) {
+ if (V8_LIKELY(use_result)) {
+ InsertCodeIntoOptimizedCodeCache(compilation_info);
+ CompilerTracer::TraceCompletedJob(isolate, compilation_info);
compilation_info->closure()->set_code(*compilation_info->code(),
kReleaseStore);
}
@@ -3276,11 +3226,12 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
- compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
- // Clear the InOptimizationQueue marker, if it exists.
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
- compilation_info->closure()->IsInOptimizationQueue()) {
- compilation_info->closure()->ClearOptimizationMarker();
+ if (V8_LIKELY(use_result)) {
+ compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
+ // Clear the InOptimizationQueue marker, if it exists.
+ if (compilation_info->closure()->IsInOptimizationQueue()) {
+ compilation_info->closure()->ClearOptimizationMarker();
+ }
}
return CompilationJob::FAILED;
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 7ff1b5eecd..e7d05b3ba3 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -190,9 +190,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
- template <typename LocalIsolate>
- static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
- FunctionLiteral* node, Handle<Script> script, LocalIsolate* isolate);
+ template <typename IsolateT>
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfo(FunctionLiteral* node,
+ Handle<Script> script,
+ IsolateT* isolate);
// ===========================================================================
// The following family of methods provides support for OSR. Code generated
@@ -205,7 +206,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Generate and return optimized code for OSR, or empty handle on failure.
V8_WARN_UNUSED_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
- Handle<JSFunction> function, BytecodeOffset osr_offset,
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
};
diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h
index cea8dc068f..2417be5d4d 100644
--- a/deps/v8/src/codegen/constants-arch.h
+++ b/deps/v8/src/codegen/constants-arch.h
@@ -6,23 +6,23 @@
#define V8_CODEGEN_CONSTANTS_ARCH_H_
#if V8_TARGET_ARCH_ARM
-#include "src/codegen/arm/constants-arm.h" // NOLINT
+#include "src/codegen/arm/constants-arm.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/codegen/arm64/constants-arm64.h" // NOLINT
+#include "src/codegen/arm64/constants-arm64.h"
#elif V8_TARGET_ARCH_IA32
-#include "src/codegen/ia32/constants-ia32.h" // NOLINT
+#include "src/codegen/ia32/constants-ia32.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/codegen/mips/constants-mips.h" // NOLINT
+#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/codegen/mips64/constants-mips64.h" // NOLINT
+#include "src/codegen/mips64/constants-mips64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
-#include "src/codegen/ppc/constants-ppc.h" // NOLINT
+#include "src/codegen/ppc/constants-ppc.h"
#elif V8_TARGET_ARCH_S390
-#include "src/codegen/s390/constants-s390.h" // NOLINT
+#include "src/codegen/s390/constants-s390.h"
#elif V8_TARGET_ARCH_X64
-#include "src/codegen/x64/constants-x64.h" // NOLINT
+#include "src/codegen/x64/constants-x64.h"
#elif V8_TARGET_ARCH_RISCV64
-#include "src/codegen/riscv64/constants-riscv64.h" // NOLINT
+#include "src/codegen/riscv64/constants-riscv64.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index b9a450ea3a..6833ee60d0 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -109,6 +109,9 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
+ static void SetSupported(CpuFeature f) { supported_ |= 1u << f; }
+ static void SetUnsupported(CpuFeature f) { supported_ &= ~(1u << f); }
+
static bool SupportsWasmSimd128();
static inline bool SupportsOptimizer();
diff --git a/deps/v8/src/codegen/external-reference-table.cc b/deps/v8/src/codegen/external-reference-table.cc
index 2741bd8ec2..6c109861a2 100644
--- a/deps/v8/src/codegen/external-reference-table.cc
+++ b/deps/v8/src/codegen/external-reference-table.cc
@@ -33,20 +33,24 @@ namespace internal {
// clang-format off
const char* const
ExternalReferenceTable::ref_name_[ExternalReferenceTable::kSize] = {
+ // === Isolate independent ===
// Special references:
"nullptr",
- // External references:
+ // External references (without isolate):
EXTERNAL_REFERENCE_LIST(ADD_EXT_REF_NAME)
- EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXT_REF_NAME)
// Builtins:
BUILTIN_LIST_C(ADD_BUILTIN_NAME)
// Runtime functions:
FOR_EACH_INTRINSIC(ADD_RUNTIME_FUNCTION)
- // Isolate addresses:
- FOR_EACH_ISOLATE_ADDRESS_NAME(ADD_ISOLATE_ADDR)
// Accessors:
ACCESSOR_INFO_LIST_GENERATOR(ADD_ACCESSOR_INFO_NAME, /* not used */)
ACCESSOR_SETTER_LIST(ADD_ACCESSOR_SETTER_NAME)
+
+ // === Isolate dependent ===
+ // External references (with isolate):
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXT_REF_NAME)
+ // Isolate addresses:
+ FOR_EACH_ISOLATE_ADDRESS_NAME(ADD_ISOLATE_ADDR)
// Stub cache:
"Load StubCache::primary_->key",
"Load StubCache::primary_->value",
@@ -72,6 +76,11 @@ const char* const
#undef ADD_ACCESSOR_SETTER_NAME
#undef ADD_STATS_COUNTER_NAME
+namespace {
+static Address ref_addr_isolate_independent_
+ [ExternalReferenceTable::kSizeIsolateIndependent] = {0};
+} // namespace
+
// Forward declarations for C++ builtins.
#define FORWARD_DECLARE(Name) \
Address Builtin_##Name(int argc, Address* args, Isolate* isolate);
@@ -81,13 +90,10 @@ BUILTIN_LIST_C(FORWARD_DECLARE)
void ExternalReferenceTable::Init(Isolate* isolate) {
int index = 0;
- // kNullAddress is preserved through serialization/deserialization.
- Add(kNullAddress, &index);
- AddReferences(isolate, &index);
- AddBuiltins(&index);
- AddRuntimeFunctions(&index);
+ CopyIsolateIndependentReferences(&index);
+
+ AddIsolateDependentReferences(isolate, &index);
AddIsolateAddresses(isolate, &index);
- AddAccessors(&index);
AddStubCache(isolate, &index);
AddNativeCodeStatsCounters(isolate, &index);
is_initialized_ = static_cast<uint32_t>(true);
@@ -108,28 +114,66 @@ const char* ExternalReferenceTable::ResolveSymbol(void* address) {
#endif // SYMBOLIZE_FUNCTION
}
+void ExternalReferenceTable::InitializeOncePerProcess() {
+ int index = 0;
+
+ // kNullAddress is preserved through serialization/deserialization.
+ AddIsolateIndependent(kNullAddress, &index);
+ AddIsolateIndependentReferences(&index);
+ AddBuiltins(&index);
+ AddRuntimeFunctions(&index);
+ AddAccessors(&index);
+
+ CHECK_EQ(kSizeIsolateIndependent, index);
+}
+
+const char* ExternalReferenceTable::NameOfIsolateIndependentAddress(
+ Address address) {
+ for (int i = 0; i < kSizeIsolateIndependent; i++) {
+ if (ref_addr_isolate_independent_[i] == address) {
+ return ref_name_[i];
+ }
+ }
+ return "<unknown>";
+}
+
void ExternalReferenceTable::Add(Address address, int* index) {
ref_addr_[(*index)++] = address;
}
-void ExternalReferenceTable::AddReferences(Isolate* isolate, int* index) {
+void ExternalReferenceTable::AddIsolateIndependent(Address address,
+ int* index) {
+ ref_addr_isolate_independent_[(*index)++] = address;
+}
+
+void ExternalReferenceTable::AddIsolateIndependentReferences(int* index) {
CHECK_EQ(kSpecialReferenceCount, *index);
#define ADD_EXTERNAL_REFERENCE(name, desc) \
- Add(ExternalReference::name().address(), index);
+ AddIsolateIndependent(ExternalReference::name().address(), index);
EXTERNAL_REFERENCE_LIST(ADD_EXTERNAL_REFERENCE)
#undef ADD_EXTERNAL_REFERENCE
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent,
+ *index);
+}
+
+void ExternalReferenceTable::AddIsolateDependentReferences(Isolate* isolate,
+ int* index) {
+ CHECK_EQ(kSizeIsolateIndependent, *index);
+
#define ADD_EXTERNAL_REFERENCE(name, desc) \
Add(ExternalReference::name(isolate).address(), index);
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXTERNAL_REFERENCE)
#undef ADD_EXTERNAL_REFERENCE
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent,
+ *index);
}
void ExternalReferenceTable::AddBuiltins(int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent,
+ *index);
static const Address c_builtins[] = {
#define DEF_ENTRY(Name, ...) FUNCTION_ADDR(&Builtin_##Name),
@@ -137,16 +181,16 @@ void ExternalReferenceTable::AddBuiltins(int* index) {
#undef DEF_ENTRY
};
for (Address addr : c_builtins) {
- Add(ExternalReference::Create(addr).address(), index);
+ AddIsolateIndependent(ExternalReference::Create(addr).address(), index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount,
*index);
}
void ExternalReferenceTable::AddRuntimeFunctions(int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount,
*index);
@@ -157,33 +201,38 @@ void ExternalReferenceTable::AddRuntimeFunctions(int* index) {
};
for (Runtime::FunctionId fId : runtime_functions) {
- Add(ExternalReference::Create(fId).address(), index);
+ AddIsolateIndependent(ExternalReference::Create(fId).address(), index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount + kRuntimeReferenceCount,
*index);
}
+void ExternalReferenceTable::CopyIsolateIndependentReferences(int* index) {
+ CHECK_EQ(0, *index);
+
+ std::copy(ref_addr_isolate_independent_,
+ ref_addr_isolate_independent_ + kSizeIsolateIndependent, ref_addr_);
+ *index += kSizeIsolateIndependent;
+}
+
void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate, int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent,
*index);
for (int i = 0; i < IsolateAddressId::kIsolateAddressCount; ++i) {
Add(isolate->get_address_from_id(static_cast<IsolateAddressId>(i)), index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
kIsolateAddressReferenceCount,
*index);
}
void ExternalReferenceTable::AddAccessors(int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount,
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount,
*index);
static const Address accessors[] = {
@@ -199,19 +248,18 @@ void ExternalReferenceTable::AddAccessors(int* index) {
};
for (Address addr : accessors) {
- Add(addr, index);
+ AddIsolateIndependent(addr, index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount,
+ kAccessorReferenceCount,
*index);
}
void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount,
*index);
StubCache* load_stub_cache = isolate->load_stub_cache();
@@ -235,10 +283,8 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
index);
Add(store_stub_cache->map_reference(StubCache::kSecondary).address(), index);
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount,
*index);
}
@@ -251,10 +297,8 @@ Address ExternalReferenceTable::GetStatsCounterAddress(StatsCounter* counter) {
void ExternalReferenceTable::AddNativeCodeStatsCounters(Isolate* isolate,
int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount,
*index);
Counters* counters = isolate->counters();
@@ -263,10 +307,9 @@ void ExternalReferenceTable::AddNativeCodeStatsCounters(Isolate* isolate,
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount + kStatsCountersReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount +
+ kStatsCountersReferenceCount,
*index);
CHECK_EQ(kSize, *index);
}
diff --git a/deps/v8/src/codegen/external-reference-table.h b/deps/v8/src/codegen/external-reference-table.h
index 9f75d0fa74..0bf42477ae 100644
--- a/deps/v8/src/codegen/external-reference-table.h
+++ b/deps/v8/src/codegen/external-reference-table.h
@@ -24,8 +24,10 @@ class ExternalReferenceTable {
public:
// For the nullptr ref, see the constructor.
static constexpr int kSpecialReferenceCount = 1;
- static constexpr int kExternalReferenceCount =
- ExternalReference::kExternalReferenceCount;
+ static constexpr int kExternalReferenceCountIsolateIndependent =
+ ExternalReference::kExternalReferenceCountIsolateIndependent;
+ static constexpr int kExternalReferenceCountIsolateDependent =
+ ExternalReference::kExternalReferenceCountIsolateDependent;
static constexpr int kBuiltinsReferenceCount =
#define COUNT_C_BUILTIN(...) +1
BUILTIN_LIST_C(COUNT_C_BUILTIN);
@@ -42,11 +44,14 @@ class ExternalReferenceTable {
#define SC(...) +1
STATS_COUNTER_NATIVE_CODE_LIST(SC);
#undef SC
- static constexpr int kSize =
- kSpecialReferenceCount + kExternalReferenceCount +
+ static constexpr int kSizeIsolateIndependent =
+ kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount + kStatsCountersReferenceCount;
+ kAccessorReferenceCount;
+ static constexpr int kSize =
+ kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount +
+ kStatsCountersReferenceCount;
static constexpr uint32_t kEntrySize =
static_cast<uint32_t>(kSystemPointerSize);
static constexpr uint32_t kSizeInBytes = kSize * kEntrySize + 2 * kUInt32Size;
@@ -63,6 +68,9 @@ class ExternalReferenceTable {
return i * kEntrySize;
}
+ static void InitializeOncePerProcess();
+ static const char* NameOfIsolateIndependentAddress(Address address);
+
const char* NameFromOffset(uint32_t offset) {
DCHECK_EQ(offset % kEntrySize, 0);
DCHECK_LT(offset, kSizeInBytes);
@@ -76,13 +84,18 @@ class ExternalReferenceTable {
void Init(Isolate* isolate);
private:
+ static void AddIsolateIndependent(Address address, int* index);
+
+ static void AddIsolateIndependentReferences(int* index);
+ static void AddBuiltins(int* index);
+ static void AddRuntimeFunctions(int* index);
+ static void AddAccessors(int* index);
+
void Add(Address address, int* index);
- void AddReferences(Isolate* isolate, int* index);
- void AddBuiltins(int* index);
- void AddRuntimeFunctions(int* index);
+ void CopyIsolateIndependentReferences(int* index);
+ void AddIsolateDependentReferences(Isolate* isolate, int* index);
void AddIsolateAddresses(Isolate* isolate, int* index);
- void AddAccessors(int* index);
void AddStubCache(Isolate* isolate, int* index);
Address GetStatsCounterAddress(StatsCounter* counter);
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index e992f1f285..3e91306b15 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -819,6 +819,9 @@ ExternalReference ExternalReference::search_string_raw() {
FUNCTION_REFERENCE(jsarray_array_join_concat_to_sequential_string,
JSArray::ArrayJoinConcatToSequentialString)
+FUNCTION_REFERENCE(length_tracking_gsab_backed_typed_array_length,
+ JSTypedArray::LengthTrackingGsabBackedTypedArrayLength)
+
ExternalReference ExternalReference::search_string_raw_one_one() {
return search_string_raw<const uint8_t, const uint8_t>();
}
@@ -1017,11 +1020,6 @@ ExternalReference ExternalReference::debug_suspended_generator_address(
return ExternalReference(isolate->debug()->suspended_generator_address());
}
-ExternalReference ExternalReference::debug_restart_fp_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->restart_fp_address());
-}
-
ExternalReference ExternalReference::fast_c_call_caller_fp_address(
Isolate* isolate) {
return ExternalReference(
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 60603bdb71..f75a5c694a 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -62,7 +62,6 @@ class StatsCounter;
V(is_profiling_address, "Isolate::is_profiling") \
V(debug_suspended_generator_address, \
"Debug::step_suspended_generator_address()") \
- V(debug_restart_fp_address, "Debug::restart_fp_address()") \
V(fast_c_call_caller_fp_address, \
"IsolateData::fast_c_call_caller_fp_address") \
V(fast_c_call_caller_pc_address, \
@@ -170,6 +169,8 @@ class StatsCounter;
V(jsarray_array_join_concat_to_sequential_string, \
"jsarray_array_join_concat_to_sequential_string") \
V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
+ V(length_tracking_gsab_backed_typed_array_length, \
+ "LengthTrackingGsabBackedTypedArrayLength") \
V(libc_memchr_function, "libc_memchr") \
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
@@ -338,10 +339,11 @@ class ExternalReference {
PROFILING_GETTER_CALL
};
- static constexpr int kExternalReferenceCount =
#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
- EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE)
- EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(COUNT_EXTERNAL_REFERENCE);
+ static constexpr int kExternalReferenceCountIsolateIndependent =
+ EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE);
+ static constexpr int kExternalReferenceCountIsolateDependent =
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(COUNT_EXTERNAL_REFERENCE);
#undef COUNT_EXTERNAL_REFERENCE
ExternalReference() : address_(kNullAddress) {}
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index e1626e2be5..5b83bf4a69 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -97,8 +97,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
int NumberOfReturnEntries() const;
#ifdef ENABLE_DISASSEMBLER
- void HandlerTableRangePrint(std::ostream& os); // NOLINT
- void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+ void HandlerTableRangePrint(std::ostream& os);
+ void HandlerTableReturnPrint(std::ostream& os);
#endif
private:
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 809df1daef..688b038e91 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -138,39 +138,38 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- // To deal with any combination of flags (e.g. --no-enable-sse4-1
- // --enable-sse-4-2), we start checking from the "highest" supported
- // extension, for each extension, enable if newer extension is supported.
- if (cpu.has_avx2() && FLAG_enable_avx2 && IsSupported(AVX)) {
- supported_ |= 1u << AVX2;
+ if (cpu.has_sse42()) SetSupported(SSE4_2);
+ if (cpu.has_sse41()) SetSupported(SSE4_1);
+ if (cpu.has_ssse3()) SetSupported(SSSE3);
+ if (cpu.has_sse3()) SetSupported(SSE3);
+ if (cpu.has_avx() && cpu.has_osxsave() && OSHasAVXSupport()) {
+ SetSupported(AVX);
+ if (cpu.has_avx2()) SetSupported(AVX2);
+ if (cpu.has_fma3()) SetSupported(FMA3);
}
- if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
- OSHasAVXSupport()) {
- supported_ |= 1u << FMA3;
- }
- if ((cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
- OSHasAVXSupport()) ||
- IsSupported(AVX2) || IsSupported(FMA3)) {
- supported_ |= 1u << AVX;
- }
- if ((cpu.has_sse42() && FLAG_enable_sse4_2) || IsSupported(AVX))
- supported_ |= 1u << SSE4_2;
- if ((cpu.has_sse41() && FLAG_enable_sse4_1) || IsSupported(SSE4_2))
- supported_ |= 1u << SSE4_1;
- if ((cpu.has_ssse3() && FLAG_enable_ssse3) || IsSupported(SSE4_1))
- supported_ |= 1u << SSSE3;
- if ((cpu.has_sse3() && FLAG_enable_sse3) || IsSupported(SSSE3))
- supported_ |= 1u << SSE3;
- if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
- if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
- if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
- if (cpu.has_popcnt() && FLAG_enable_popcnt) supported_ |= 1u << POPCNT;
+
+ if (cpu.has_bmi1() && FLAG_enable_bmi1) SetSupported(BMI1);
+ if (cpu.has_bmi2() && FLAG_enable_bmi2) SetSupported(BMI2);
+ if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
+ if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) supported_ |= 1u << ATOM;
+ if (cpu.is_atom()) SetSupported(ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- supported_ |= 1u << ATOM;
+ SetSupported(ATOM);
}
+ // Ensure that supported cpu features make sense. E.g. it is wrong to support
+ // AVX but not SSE4_2, if we have --enable-avx and --no-enable-sse4-2, the
+ // code above would set AVX to supported, and SSE4_2 to unsupported, then the
+ // checks below will set AVX to unsupported.
+ if (!FLAG_enable_sse3) SetUnsupported(SSE3);
+ if (!FLAG_enable_ssse3 || !IsSupported(SSE3)) SetUnsupported(SSSE3);
+ if (!FLAG_enable_sse4_1 || !IsSupported(SSSE3)) SetUnsupported(SSE4_1);
+ if (!FLAG_enable_sse4_2 || !IsSupported(SSE4_1)) SetUnsupported(SSE4_2);
+ if (!FLAG_enable_avx || !IsSupported(SSE4_2)) SetUnsupported(AVX);
+ if (!FLAG_enable_avx2 || !IsSupported(AVX)) SetUnsupported(AVX2);
+ if (!FLAG_enable_fma3 || !IsSupported(AVX)) SetUnsupported(FMA3);
+
// Set a static value on whether Simd is supported.
// This variable is only used for certain archs to query SupportWasmSimd128()
// at runtime in builtins using an extern ref. Other callers should use
@@ -2489,6 +2488,13 @@ void Assembler::movhlps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x16);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::movlps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2980,6 +2986,10 @@ void Assembler::vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
}
+void Assembler::vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+}
+
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
}
@@ -3276,9 +3286,9 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
- DCHECK(IsEnabled(AVX));
+ XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
EMIT(op);
@@ -3286,8 +3296,9 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w) {
- DCHECK(IsEnabled(AVX));
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
EMIT(op);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 2a8fd3ee28..806d17a2d4 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -868,6 +868,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void movhlps(XMMRegister dst, XMMRegister src);
+ void movlhps(XMMRegister dst, XMMRegister src);
void movlps(XMMRegister dst, Operand src);
void movlps(Operand dst, XMMRegister src);
void movhps(XMMRegister dst, Operand src);
@@ -1398,6 +1399,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
void vmovlps(Operand dst, XMMRegister src);
void vmovhps(XMMRegister dst, XMMRegister src1, Operand src2);
@@ -1516,6 +1518,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovshdup(XMMRegister dst, XMMRegister src) {
vinstr(0x16, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vbroadcastss(XMMRegister dst, XMMRegister src) {
+ vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0, AVX2);
+ }
void vbroadcastss(XMMRegister dst, Operand src) {
vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0);
}
@@ -1892,9 +1897,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
byte escape2, byte opcode);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w);
+ SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w);
+ SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
// Most BMI instructions are similar.
void bmi1(byte op, Register reg, Register vreg, Operand rm);
void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
@@ -1933,10 +1938,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// checks that we did not generate too much.
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+ explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer();
#ifdef DEBUG
- space_before_ = assembler_->available_space();
+ space_before_ = assembler->available_space();
#endif
}
@@ -1948,7 +1953,7 @@ class EnsureSpace {
#endif
private:
- Assembler* assembler_;
+ Assembler* const assembler_;
#ifdef DEBUG
int space_before_;
#endif
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
new file mode 100644
index 0000000000..d079dfd725
--- /dev/null
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
@@ -0,0 +1,267 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_IA32_INTERFACE_DESCRIPTORS_IA32_INL_H_
+#define V8_CODEGEN_IA32_INTERFACE_DESCRIPTORS_IA32_INL_H_
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(eax, ecx, edx, edi);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(eax, ecx, edx, edi, esi);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return edx; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return ecx; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return eax; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return no_reg; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return edi;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return edx; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return ecx; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return no_reg; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return no_reg; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() {
+ return no_reg;
+}
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return edi; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return eax; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return esi;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ return edi;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(ecx); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // eax : number of arguments
+ // edi : the target to call
+ return RegisterArray(edi, eax);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // ecx : arguments list length (untagged)
+ // On the stack : arguments list (FixedArray)
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // eax : number of arguments
+ // ecx : start index (to support rest parameters)
+ // edi : the target to call
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // edx : function template info
+ // ecx : number of arguments (on the stack, not including receiver)
+ return RegisterArray(edx, ecx);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // ecx : the object to spread
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // edi : the target to call
+ // edx : the arguments list
+ return RegisterArray(edi, edx);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // edx : the new target
+ // ecx : arguments list length (untagged)
+ // On the stack : arguments list (FixedArray)
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // eax : number of arguments
+ // edx : the new target
+ // ecx : start index (to support rest parameters)
+ // edi : the target to call
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // edx : the new target
+ // ecx : the object to spread
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // edi : the target to call
+ // edx : the new target
+ // ecx : the arguments list
+ return RegisterArray(edi, edx, ecx);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ // ecx : allocation site or undefined
+ // TODO(jgruber): Remove the unused allocation site parameter.
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(edx); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ return RegisterArray(edx, eax);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ return RegisterArray(edx, eax, ecx);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ return RegisterArray(edx, eax);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ return RegisterArray(edx, eax, ecx);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(edx, // kApiFunctionAddress
+ ecx, // kArgc
+ eax, // kCallData
+ edi); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(eax, // argument count (not including receiver)
+ ecx, // address of first argument
+ edi); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(eax, // argument count (not including receiver)
+ ecx); // address of first argument
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(eax, // the value to pass to the generator
+ edx); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto WasmFloat32ToNumberDescriptor::registers() {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ return RegisterArray(ecx);
+}
+
+// static
+constexpr auto WasmFloat64ToNumberDescriptor::registers() {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ return RegisterArray(ecx);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_IA32
+
+#endif // V8_CODEGEN_IA32_INTERFACE_DESCRIPTORS_IA32_INL_H_
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
deleted file mode 100644
index fd76e01590..0000000000
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- constexpr Register default_stub_registers[] = {eax, ecx, edx, edi};
- STATIC_ASSERT(arraysize(default_stub_registers) == kMaxBuiltinRegisterParams);
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static const Register default_stub_registers[] = {ecx, edx, esi, edi,
- kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {eax, ecx, edx, edi, esi};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static const Register default_stub_registers[] = {ecx, edx, esi, edi,
- kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return edx; }
-const Register LoadDescriptor::NameRegister() { return ecx; }
-const Register LoadDescriptor::SlotRegister() { return eax; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return no_reg; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return edi;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return edx; }
-const Register StoreDescriptor::NameRegister() { return ecx; }
-const Register StoreDescriptor::ValueRegister() { return no_reg; }
-const Register StoreDescriptor::SlotRegister() { return no_reg; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return no_reg; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-const Register StoreTransitionDescriptor::VectorRegister() { return no_reg; }
-const Register StoreTransitionDescriptor::MapRegister() { return edi; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
-const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- return esi;
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return edi; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edi : the target to call
- Register registers[] = {edi, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // ecx : arguments list length (untagged)
- // On the stack : arguments list (FixedArray)
- Register registers[] = {edi, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // ecx : start index (to support rest parameters)
- // edi : the target to call
- Register registers[] = {edi, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // edx : function template info
- // ecx : number of arguments (on the stack, not including receiver)
- Register registers[] = {edx, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // ecx : the object to spread
- Register registers[] = {edi, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // edi : the target to call
- // edx : the arguments list
- Register registers[] = {edi, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // edx : the new target
- // ecx : arguments list length (untagged)
- // On the stack : arguments list (FixedArray)
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // ecx : start index (to support rest parameters)
- // edi : the target to call
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // edx : the new target
- // ecx : the object to spread
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // edi : the target to call
- // edx : the new target
- // ecx : the arguments list
- Register registers[] = {edi, edx, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // edi : the target to call
- // ecx : allocation site or undefined
- // TODO(jgruber): Remove the unused allocation site parameter.
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edx, // kApiFunctionAddress
- ecx, // kArgc
- eax, // kCallData
- edi, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- ecx, // address of first argument
- edi // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- ecx, // address of first argument
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // the value to pass to the generator
- edx // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void WasmFloat32ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // Work around using eax, whose register code is 0, and leads to the FP
- // parameter being passed via xmm0, which is not allocatable on ia32.
- Register registers[] = {ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // Work around using eax, whose register code is 0, and leads to the FP
- // parameter being passed via xmm0, which is not allocatable on ia32.
- Register registers[] = {ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 9892eb9470..7c8af3fde0 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -19,7 +19,7 @@
#include "src/codegen/external-reference.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/label.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register.h"
@@ -294,7 +294,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
}
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
// Count all XMM registers except XMM0.
bytes += kDoubleSize * (XMMRegister::kNumRegisters - 1);
}
@@ -316,7 +316,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
}
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
// Save all XMM registers except XMM0.
int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
AllocateStackSpace(delta);
@@ -333,7 +333,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
// Restore all XMM registers except XMM0.
int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
@@ -365,7 +365,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -374,7 +374,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
test_b(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
@@ -383,13 +383,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
}
@@ -511,13 +511,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(value != address);
AssertNotSmi(object);
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
}
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
cmp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -529,7 +529,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of Smis and stores into young gen.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
// Skip barrier if writing a smi.
JumpIfSmi(value, &done, Label::kNear);
}
@@ -549,25 +549,12 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- Label dont_drop;
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- mov(eax, ExternalReferenceAsOperand(restart_fp, eax));
- test(eax, eax);
- j(zero, &dont_drop, Label::kNear);
-
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
- bind(&dont_drop);
-}
-
void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
xorps(dst, dst);
cvtsi2ss(dst, src);
@@ -1029,14 +1016,14 @@ void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(equal, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
@@ -1049,7 +1036,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object, Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
@@ -1062,7 +1049,7 @@ void MacroAssembler::AssertFunction(Register object, Register scratch) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
@@ -1073,7 +1060,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -1105,7 +1092,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, scratch, RootIndex::kUndefinedValue);
@@ -1118,7 +1105,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmi);
}
@@ -1147,7 +1134,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
+ if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
@@ -1389,8 +1376,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
Move(kRuntimeCallFunctionRegister, Immediate(ext));
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1494,7 +1481,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
if (expected_parameter_count != actual_parameter_count) {
DCHECK_EQ(actual_parameter_count, eax);
DCHECK_EQ(expected_parameter_count, ecx);
@@ -1531,9 +1518,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(scratch);
// Extra words are the receiver and the return address (if a jump).
- int extra_words = flag == CALL_FUNCTION ? 1 : 2;
+ int extra_words = type == InvokeType::kCall ? 1 : 2;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
- Set(current, 0);
+ Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
bind(&copy);
mov(scratch, Operand(src, current, times_system_pointer_size, 0));
@@ -1610,9 +1597,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, edi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
DCHECK(expected_parameter_count == ecx || expected_parameter_count == eax);
@@ -1636,17 +1623,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(ecx);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(ecx);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(ecx);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(ecx);
+ break;
}
jmp(&done, Label::kNear);
@@ -1661,9 +1650,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(type == InvokeType::kJump || has_frame());
DCHECK(fun == edi);
mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -1671,7 +1660,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
movzx_w(ecx,
FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- InvokeFunctionCode(edi, new_target, ecx, actual_parameter_count, flag);
+ InvokeFunctionCode(edi, new_target, ecx, actual_parameter_count, type);
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
@@ -1852,34 +1841,6 @@ void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
pshufb(dst, mask);
}
-void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpblendw(dst, dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pblendw(dst, src, imm8);
- return;
- }
- FATAL("no AVX or SSE4.1 support");
-}
-
-void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpalignr(dst, dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSSE3)) {
- CpuFeatureScope sse_scope(this, SSSE3);
- palignr(dst, src, imm8);
- return;
- }
- FATAL("no AVX or SSE3 support");
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -1994,19 +1955,6 @@ void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
shufps(dst, dst, static_cast<byte>(0));
}
-void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vshufps(dst, src1, src2, imm8);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- shufps(dst, src2, imm8);
- }
-}
-
void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2079,11 +2027,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
- if (emit_debug_code()) Check(cc, reason);
+ if (FLAG_debug_code) Check(cc, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cc, AbortReason reason) {
@@ -2109,11 +2057,11 @@ void TurboAssembler::CheckStackAlignment() {
}
void TurboAssembler::Abort(AbortReason reason) {
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2171,7 +2119,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
CheckStackAlignment();
}
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 4c5c3ade02..a21a355568 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -44,9 +44,6 @@ class StatsCounter;
// distinguish memory operands from other operands on ia32.
using MemOperand = Operand;
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
// TODO(victorgomes): Move definition to macro-assembler.h, once all other
// platforms are updated.
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
@@ -122,6 +119,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CheckStackAlignment();
// Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, int32_t x) {
+ if (x == 0) {
+ xor_(dst, dst);
+ } else {
+ mov(dst, Immediate(x));
+ }
+ }
void Move(Register dst, const Immediate& src);
void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
void Move(Register dst, Handle<HeapObject> src);
@@ -301,152 +305,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
-// Only use these macros when non-destructive source of AVX version is not
-// needed.
-#define AVX_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else { \
- name(dst, src); \
- } \
- }
-#define AVX_OP3_XO(macro_name, name) \
- AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
- AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
-
- AVX_OP3_XO(Packsswb, packsswb)
- AVX_OP3_XO(Packuswb, packuswb)
- AVX_OP3_XO(Paddusb, paddusb)
- AVX_OP3_XO(Pand, pand)
- AVX_OP3_XO(Pcmpeqb, pcmpeqb)
- AVX_OP3_XO(Pcmpeqw, pcmpeqw)
- AVX_OP3_XO(Pcmpeqd, pcmpeqd)
- AVX_OP3_XO(Por, por)
- AVX_OP3_XO(Psubb, psubb)
- AVX_OP3_XO(Psubw, psubw)
- AVX_OP3_XO(Psubd, psubd)
- AVX_OP3_XO(Psubq, psubq)
- AVX_OP3_XO(Punpcklbw, punpcklbw)
- AVX_OP3_XO(Punpckhbw, punpckhbw)
- AVX_OP3_XO(Punpckldq, punpckldq)
- AVX_OP3_XO(Punpcklqdq, punpcklqdq)
- AVX_OP3_XO(Pxor, pxor)
- AVX_OP3_XO(Andps, andps)
- AVX_OP3_XO(Andpd, andpd)
- AVX_OP3_XO(Xorps, xorps)
- AVX_OP3_XO(Xorpd, xorpd)
- AVX_OP3_XO(Sqrtss, sqrtss)
- AVX_OP3_XO(Sqrtsd, sqrtsd)
- AVX_OP3_XO(Orps, orps)
- AVX_OP3_XO(Orpd, orpd)
- AVX_OP3_XO(Andnpd, andnpd)
- AVX_OP3_WITH_TYPE(Movhlps, movhlps, XMMRegister, XMMRegister)
- AVX_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
- AVX_OP3_WITH_TYPE(Psrlq, psrlq, XMMRegister, uint8_t)
-
-#undef AVX_OP3_XO
-#undef AVX_OP3_WITH_TYPE
-
-// Same as AVX_OP3_WITH_TYPE but supports a CpuFeatureScope
-#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
- sse_scope) \
- void macro_name(dst_type dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else if (CpuFeatures::IsSupported(sse_scope)) { \
- CpuFeatureScope scope(this, sse_scope); \
- name(dst, src); \
- } \
- }
-#define AVX_OP2_XO(macro_name, name, sse_scope) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, \
- sse_scope) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, sse_scope)
- AVX_OP2_XO(Psignb, psignb, SSSE3)
- AVX_OP2_XO(Psignw, psignw, SSSE3)
- AVX_OP2_XO(Psignd, psignd, SSSE3)
- AVX_OP2_XO(Pcmpeqq, pcmpeqq, SSE4_1)
-#undef AVX_OP2_XO
-#undef AVX_OP2_WITH_TYPE_SCOPE
-
-// Only use this macro when dst and src1 is the same in SSE case.
-#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, dst_type src1, src_type src2) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src1, src2); \
- } else { \
- DCHECK_EQ(dst, src1); \
- name(dst, src2); \
- } \
- }
-#define AVX_PACKED_OP3(macro_name, name) \
- AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
- AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
-
- AVX_PACKED_OP3(Unpcklps, unpcklps)
- AVX_PACKED_OP3(Andnps, andnps)
- AVX_PACKED_OP3(Addps, addps)
- AVX_PACKED_OP3(Addpd, addpd)
- AVX_PACKED_OP3(Subps, subps)
- AVX_PACKED_OP3(Subpd, subpd)
- AVX_PACKED_OP3(Mulps, mulps)
- AVX_PACKED_OP3(Mulpd, mulpd)
- AVX_PACKED_OP3(Divps, divps)
- AVX_PACKED_OP3(Divpd, divpd)
- AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
- AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
- AVX_PACKED_OP3(Cmpltpd, cmpltpd)
- AVX_PACKED_OP3(Cmpleps, cmpleps)
- AVX_PACKED_OP3(Cmplepd, cmplepd)
- AVX_PACKED_OP3(Minps, minps)
- AVX_PACKED_OP3(Minpd, minpd)
- AVX_PACKED_OP3(Maxps, maxps)
- AVX_PACKED_OP3(Maxpd, maxpd)
- AVX_PACKED_OP3(Cmpunordps, cmpunordps)
- AVX_PACKED_OP3(Cmpunordpd, cmpunordpd)
- AVX_PACKED_OP3(Psllw, psllw)
- AVX_PACKED_OP3(Pslld, pslld)
- AVX_PACKED_OP3(Psllq, psllq)
- AVX_PACKED_OP3(Psrlw, psrlw)
- AVX_PACKED_OP3(Psrld, psrld)
- AVX_PACKED_OP3(Psrlq, psrlq)
- AVX_PACKED_OP3(Psraw, psraw)
- AVX_PACKED_OP3(Psrad, psrad)
- AVX_PACKED_OP3(Paddd, paddd)
- AVX_PACKED_OP3(Paddq, paddq)
- AVX_PACKED_OP3(Psubd, psubd)
- AVX_PACKED_OP3(Psubq, psubq)
- AVX_PACKED_OP3(Pmuludq, pmuludq)
- AVX_PACKED_OP3(Pavgb, pavgb)
- AVX_PACKED_OP3(Pavgw, pavgw)
- AVX_PACKED_OP3(Pand, pand)
- AVX_PACKED_OP3(Pminub, pminub)
- AVX_PACKED_OP3(Pmaxub, pmaxub)
- AVX_PACKED_OP3(Paddusb, paddusb)
- AVX_PACKED_OP3(Psubusb, psubusb)
- AVX_PACKED_OP3(Pcmpgtb, pcmpgtb)
- AVX_PACKED_OP3(Pcmpeqb, pcmpeqb)
- AVX_PACKED_OP3(Paddb, paddb)
- AVX_PACKED_OP3(Paddsb, paddsb)
- AVX_PACKED_OP3(Psubb, psubb)
- AVX_PACKED_OP3(Psubsb, psubsb)
-
-#undef AVX_PACKED_OP3
-
- AVX_PACKED_OP3_WITH_TYPE(Psllw, psllw, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Pslld, pslld, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psllq, psllq, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrlw, psrlw, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrld, psrld, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrlq, psrlq, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrad, psrad, XMMRegister, uint8_t)
-
-#undef AVX_PACKED_OP3_WITH_TYPE
+ // Defined here because some callers take a pointer to member functions.
+ AVX_OP(Pcmpeqb, pcmpeqb)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
// Macro for instructions that have 2 operands for AVX version and 1 operand for
// SSE version. Will move src1 to dst if dst != src1.
@@ -468,35 +331,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
AVX_OP3_WITH_MOVE(Pmaddwd, pmaddwd, XMMRegister, Operand)
#undef AVX_OP3_WITH_MOVE
-#define AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
- sse_scope) \
- void macro_name(dst_type dst, dst_type src1, src_type src2) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src1, src2); \
- return; \
- } \
- if (CpuFeatures::IsSupported(sse_scope)) { \
- CpuFeatureScope scope(this, sse_scope); \
- DCHECK_EQ(dst, src1); \
- name(dst, src2); \
- return; \
- } \
- UNREACHABLE(); \
- }
-#define AVX_OP3_XO_SSE4(macro_name, name) \
- AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
- AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
-
- AVX_OP3_WITH_TYPE_SCOPE(Haddps, haddps, XMMRegister, Operand, SSE3)
- AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
- AVX_OP3_XO_SSE4(Pminsb, pminsb)
- AVX_OP3_XO_SSE4(Pmaxsb, pmaxsb)
- AVX_OP3_XO_SSE4(Pcmpeqq, pcmpeqq)
-
-#undef AVX_OP3_XO_SSE4
-#undef AVX_OP3_WITH_TYPE_SCOPE
-
// TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
void Movlps(Operand dst, XMMRegister src) {
SharedTurboAssembler::Movlps(dst, src);
@@ -513,16 +347,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
}
void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
- void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
- Pblendw(dst, Operand(src), imm8);
- }
- void Pblendw(XMMRegister dst, Operand src, uint8_t imm8);
-
- void Palignr(XMMRegister dst, XMMRegister src, uint8_t imm8) {
- Palignr(dst, Operand(src), imm8);
- }
- void Palignr(XMMRegister dst, Operand src, uint8_t imm8);
-
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
Pinsrb(dst, Operand(src), imm8);
@@ -544,10 +368,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
void Vbroadcastss(XMMRegister dst, Operand src);
- // Shufps that will mov src1 into dst if AVX is not supported.
- void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- uint8_t imm8);
-
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -680,15 +500,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int32_t x) {
- if (x == 0) {
- xor_(dst, dst);
- } else {
- mov(dst, Immediate(x));
- }
- }
-
void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -722,8 +533,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -732,11 +543,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// write barrier if the value is a smi.
void RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Frame restart support
- void MaybeDropFrames();
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
@@ -768,7 +576,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
// This may clobber ecx.
@@ -779,7 +587,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
@@ -865,18 +673,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -921,7 +729,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
void EnterExitFramePrologue(StackFrame::Type frame_type, Register scratch);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h
new file mode 100644
index 0000000000..273e9d3e8e
--- /dev/null
+++ b/deps/v8/src/codegen/interface-descriptors-inl.h
@@ -0,0 +1,484 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_INTERFACE_DESCRIPTORS_INL_H_
+#define V8_CODEGEN_INTERFACE_DESCRIPTORS_INL_H_
+
+#include <utility>
+
+#include "src/base/logging.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/register-arch.h"
+
+#if V8_TARGET_ARCH_X64
+#include "src/codegen/x64/interface-descriptors-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/interface-descriptors-arm64-inl.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/interface-descriptors-ia32-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/interface-descriptors-arm-inl.h"
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+#include "src/codegen/ppc/interface-descriptors-ppc-inl.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/interface-descriptors-s390-inl.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/interface-descriptors-mips-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+// static
+constexpr std::array<Register, kJSBuiltinRegisterParams>
+CallInterfaceDescriptor::DefaultJSRegisterArray() {
+ return RegisterArray(
+ kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register);
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr auto StaticCallInterfaceDescriptor<DerivedDescriptor>::registers() {
+ return CallInterfaceDescriptor::DefaultRegisterArray();
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr auto StaticJSCallInterfaceDescriptor<DerivedDescriptor>::registers() {
+ return CallInterfaceDescriptor::DefaultJSRegisterArray();
+}
+
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // Static local copy of the Registers array, for platform-specific
+ // initialization
+ static auto registers = DerivedDescriptor::registers();
+
+ // The passed pointer should be a modifiable pointer to our own data.
+ DCHECK_EQ(data, this->data());
+ DCHECK(!data->IsInitialized());
+
+ if (DerivedDescriptor::kRestrictAllocatableRegisters) {
+ data->RestrictAllocatableRegisters(registers.data(), registers.size());
+ }
+
+ data->InitializeRegisters(
+ DerivedDescriptor::flags(), DerivedDescriptor::kReturnCount,
+ DerivedDescriptor::GetParameterCount(),
+ DerivedDescriptor::kStackArgumentOrder,
+ DerivedDescriptor::GetRegisterParameterCount(), registers.data());
+
+ // InitializeTypes is customizable by the DerivedDescriptor subclass.
+ DerivedDescriptor::InitializeTypes(data);
+
+ DCHECK(data->IsInitialized());
+ DCHECK(this->CheckFloatingPointParameters(data));
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetReturnCount() {
+ static_assert(
+ DerivedDescriptor::kReturnCount >= 0,
+ "DerivedDescriptor subclass should override return count with a value "
+ "that is greater than 0");
+
+ return DerivedDescriptor::kReturnCount;
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetParameterCount() {
+ static_assert(
+ DerivedDescriptor::kParameterCount >= 0,
+ "DerivedDescriptor subclass should override parameter count with a "
+ "value that is greater than 0");
+
+ return DerivedDescriptor::kParameterCount;
+}
+
+namespace detail {
+
+// Helper trait for statically checking if a type is a std::array<Register,N>.
+template <typename T>
+struct IsRegisterArray : public std::false_type {};
+template <size_t N>
+struct IsRegisterArray<std::array<Register, N>> : public std::true_type {};
+template <>
+struct IsRegisterArray<EmptyRegisterArray> : public std::true_type {};
+
+// Helper for finding the index of the first invalid register in a register
+// array.
+template <size_t N, size_t Index>
+struct FirstInvalidRegisterHelper {
+ static constexpr int Call(std::array<Register, N> regs) {
+ if (!std::get<Index>(regs).is_valid()) {
+ // All registers after the first invalid one have to also be invalid (this
+ // DCHECK will be checked recursively).
+ DCHECK_EQ((FirstInvalidRegisterHelper<N, Index + 1>::Call(regs)),
+ Index + 1);
+ return Index;
+ }
+ return FirstInvalidRegisterHelper<N, Index + 1>::Call(regs);
+ }
+};
+template <size_t N>
+struct FirstInvalidRegisterHelper<N, N> {
+ static constexpr int Call(std::array<Register, N> regs) { return N; }
+};
+template <size_t N, size_t Index = 0>
+constexpr size_t FirstInvalidRegister(std::array<Register, N> regs) {
+ return FirstInvalidRegisterHelper<N, 0>::Call(regs);
+}
+constexpr size_t FirstInvalidRegister(EmptyRegisterArray regs) { return 0; }
+
+} // namespace detail
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetRegisterParameterCount() {
+ static_assert(
+ detail::IsRegisterArray<decltype(DerivedDescriptor::registers())>::value,
+ "DerivedDescriptor subclass should define a registers() function "
+ "returning a std::array<Register>");
+
+ // The register parameter count is the minimum of:
+ // 1. The number of named parameters in the descriptor, and
+ // 2. The number of valid registers the descriptor provides with its
+ // registers() function, e.g. for {rax, rbx, no_reg} this number is 2.
+ // 3. The maximum number of register parameters allowed (
+ // kMaxBuiltinRegisterParams for most builtins,
+ // kMaxTFSBuiltinRegisterParams for TFS builtins, customizable by the
+ // subclass otherwise).
+ return std::min<int>({DerivedDescriptor::GetParameterCount(),
+ static_cast<int>(detail::FirstInvalidRegister(
+ DerivedDescriptor::registers())),
+ DerivedDescriptor::kMaxRegisterParams});
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetStackParameterCount() {
+ return DerivedDescriptor::GetParameterCount() -
+ DerivedDescriptor::GetRegisterParameterCount();
+}
+
+// static
+constexpr Register FastNewObjectDescriptor::TargetRegister() {
+ return kJSFunctionRegister;
+}
+
+// static
+constexpr Register FastNewObjectDescriptor::NewTargetRegister() {
+ return kJavaScriptCallNewTargetRegister;
+}
+
+// static
+constexpr Register ApiGetterDescriptor::ReceiverRegister() {
+ return LoadDescriptor::ReceiverRegister();
+}
+
+// static
+constexpr Register LoadGlobalNoFeedbackDescriptor::ICKindRegister() {
+ return LoadDescriptor::SlotRegister();
+}
+
+// static
+constexpr Register LoadNoFeedbackDescriptor::ICKindRegister() {
+ return LoadGlobalNoFeedbackDescriptor::ICKindRegister();
+}
+
+#if V8_TARGET_ARCH_IA32
+// On ia32, LoadWithVectorDescriptor passes vector on the stack and thus we
+// need to choose a new register here.
+// static
+constexpr Register LoadGlobalWithVectorDescriptor::VectorRegister() {
+ STATIC_ASSERT(!LoadWithVectorDescriptor::VectorRegister().is_valid());
+ return LoadDescriptor::ReceiverRegister();
+}
+#else
+// static
+constexpr Register LoadGlobalWithVectorDescriptor::VectorRegister() {
+ return LoadWithVectorDescriptor::VectorRegister();
+}
+#endif
+
+// static
+constexpr auto LoadDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), NameRegister(), SlotRegister());
+}
+
+// static
+constexpr auto LoadBaselineDescriptor::registers() {
+ return LoadDescriptor::registers();
+}
+
+// static
+constexpr auto LoadGlobalDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto LoadGlobalBaselineDescriptor::registers() {
+ return LoadGlobalDescriptor::registers();
+}
+
+// static
+constexpr auto StoreDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister());
+}
+
+// static
+constexpr auto StoreBaselineDescriptor::registers() {
+ return StoreDescriptor::registers();
+}
+
+// static
+constexpr auto StoreGlobalDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto StoreGlobalBaselineDescriptor::registers() {
+ return StoreGlobalDescriptor::registers();
+}
+
+// static
+constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
+ return RegisterArray(
+ LoadDescriptor::ReceiverRegister(),
+ LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
+ LoadDescriptor::NameRegister(), LoadDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
+ // TODO(v8:11421): Implement on other platforms.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
+ return RegisterArray(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
+ kInterpreterBytecodeArrayRegister);
+#else
+ return DefaultRegisterArray();
+#endif
+}
+
+// static
+constexpr auto BaselineLeaveFrameDescriptor::registers() {
+ // TODO(v8:11421): Implement on other platforms.
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
+ return RegisterArray(ParamsSizeRegister(), WeightRegister());
+#else
+ return DefaultRegisterArray();
+#endif
+}
+
+// static
+constexpr auto VoidDescriptor::registers() { return RegisterArray(); }
+
+// static
+constexpr auto AllocateDescriptor::registers() {
+ return RegisterArray(kAllocateSizeRegister);
+}
+
+// static
+constexpr auto CEntry1ArgvOnStackDescriptor::registers() {
+ return RegisterArray(kRuntimeCallArgCountRegister,
+ kRuntimeCallFunctionRegister);
+}
+
+// static
+constexpr auto InterpreterCEntry1Descriptor::registers() {
+ return RegisterArray(kRuntimeCallArgCountRegister, kRuntimeCallArgvRegister,
+ kRuntimeCallFunctionRegister);
+}
+
+// static
+constexpr auto InterpreterCEntry2Descriptor::registers() {
+ return RegisterArray(kRuntimeCallArgCountRegister, kRuntimeCallArgvRegister,
+ kRuntimeCallFunctionRegister);
+}
+
+// static
+constexpr auto FastNewObjectDescriptor::registers() {
+ return RegisterArray(TargetRegister(), NewTargetRegister());
+}
+
+// static
+constexpr auto TailCallOptimizedCodeSlotDescriptor::registers() {
+ return RegisterArray(kJavaScriptCallCodeStartRegister);
+}
+
+// static
+constexpr auto LoadNoFeedbackDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::ReceiverRegister(),
+ LoadDescriptor::NameRegister(), ICKindRegister());
+}
+
+// static
+constexpr auto LoadGlobalNoFeedbackDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::NameRegister(), ICKindRegister());
+}
+
+// static
+constexpr auto LoadGlobalWithVectorDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister(), VectorRegister());
+}
+
+// static
+constexpr auto LoadWithReceiverAndVectorDescriptor::registers() {
+ return RegisterArray(
+ LoadDescriptor::ReceiverRegister(), LookupStartObjectRegister(),
+ LoadDescriptor::NameRegister(), LoadDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister());
+}
+
+// static
+constexpr auto StoreGlobalWithVectorDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
+}
+
+// static
+constexpr auto StoreTransitionDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), MapRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
+}
+
+// static
+constexpr auto TypeConversionDescriptor::registers() {
+ return RegisterArray(ArgumentRegister());
+}
+
+// static
+constexpr auto TypeConversionNoContextDescriptor::registers() {
+ return RegisterArray(TypeConversionDescriptor::ArgumentRegister());
+}
+
+// static
+constexpr auto SingleParameterOnStackDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto AsyncFunctionStackParameterDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto GetIteratorStackParameterDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto LoadWithVectorDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::ReceiverRegister(),
+ LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister(), VectorRegister());
+}
+
+// static
+constexpr auto StoreWithVectorDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister(), VectorRegister());
+}
+
+// static
+constexpr auto ApiGetterDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), HolderRegister(),
+ CallbackRegister());
+}
+
+// static
+constexpr auto ContextOnlyDescriptor::registers() { return RegisterArray(); }
+
+// static
+constexpr auto NoContextDescriptor::registers() { return RegisterArray(); }
+
+// static
+constexpr auto GrowArrayElementsDescriptor::registers() {
+ return RegisterArray(ObjectRegister(), KeyRegister());
+}
+
+// static
+constexpr auto ArrayNArgumentsConstructorDescriptor::registers() {
+ // Keep the arguments on the same registers as they were in
+ // ArrayConstructorDescriptor to avoid unnecessary register moves.
+ // kFunction, kAllocationSite, kActualArgumentsCount
+ return RegisterArray(kJavaScriptCallTargetRegister,
+ kJavaScriptCallExtraArg1Register,
+ kJavaScriptCallArgCountRegister);
+}
+
+// static
+constexpr auto ArrayNoArgumentConstructorDescriptor::registers() {
+ // This descriptor must use the same set of registers as the
+ // ArrayNArgumentsConstructorDescriptor.
+ return ArrayNArgumentsConstructorDescriptor::registers();
+}
+
+// static
+constexpr auto ArraySingleArgumentConstructorDescriptor::registers() {
+ // This descriptor must use the same set of registers as the
+ // ArrayNArgumentsConstructorDescriptor.
+ return ArrayNArgumentsConstructorDescriptor::registers();
+}
+
+// static
+// static
+constexpr Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
+ return GetRegisterParameter(0);
+}
+
+#define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, DescriptorName) \
+ template <> \
+ struct CallInterfaceDescriptorFor<Builtins::k##Name> { \
+ using type = DescriptorName##Descriptor; \
+ };
+BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN,
+ /*TFC*/ DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER, IGNORE_BUILTIN,
+ /*TFH*/ DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER, IGNORE_BUILTIN,
+ /*ASM*/ DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER)
+#undef DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER
+#define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, ...) \
+ template <> \
+ struct CallInterfaceDescriptorFor<Builtins::k##Name> { \
+ using type = Name##Descriptor; \
+ };
+BUILTIN_LIST_TFS(DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER)
+#undef DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_INTERFACE_DESCRIPTORS_INL_H_
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 53b678580e..2cafcae344 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -4,49 +4,48 @@
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
-void CallInterfaceDescriptorData::InitializePlatformSpecific(
- int register_parameter_count, const Register* registers) {
- DCHECK(!IsInitializedPlatformIndependent());
-
- register_param_count_ = register_parameter_count;
-
- // UBSan doesn't like creating zero-length arrays.
- if (register_parameter_count == 0) return;
+void CallInterfaceDescriptorData::InitializeRegisters(
+ Flags flags, int return_count, int parameter_count,
+ StackArgumentOrder stack_order, int register_parameter_count,
+ const Register* registers) {
+ DCHECK(!IsInitializedTypes());
- // InterfaceDescriptor owns a copy of the registers array.
- register_params_ = NewArray<Register>(register_parameter_count, no_reg);
- for (int i = 0; i < register_parameter_count; i++) {
- // The value of the root register must be reserved, thus any uses
- // within the calling convention are disallowed.
#ifdef DEBUG
- CHECK_NE(registers[i], kRootRegister);
+ {
+ // Make sure that the registers are all valid, and don't alias each other.
+ RegList reglist = 0;
+ for (int i = 0; i < register_parameter_count; ++i) {
+ Register reg = registers[i];
+ DCHECK(reg.is_valid());
+ DCHECK_EQ(reglist & reg.bit(), 0);
+ DCHECK_NE(reg, kRootRegister);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- CHECK_NE(registers[i], kPointerCageBaseRegister);
+ DCHECK_NE(reg, kPtrComprCageBaseRegister);
#endif
- // Check for duplicated registers.
- for (int j = i + 1; j < register_parameter_count; j++) {
- CHECK_NE(registers[i], registers[j]);
+ reglist = CombineRegLists(reglist, reg.bit());
}
-#endif
- register_params_[i] = registers[i];
}
-}
-
-void CallInterfaceDescriptorData::InitializePlatformIndependent(
- Flags flags, int return_count, int parameter_count,
- const MachineType* machine_types, int machine_types_length,
- StackArgumentOrder stack_order) {
- DCHECK(IsInitializedPlatformSpecific());
+#endif
flags_ = flags;
stack_order_ = stack_order;
return_count_ = return_count;
param_count_ = parameter_count;
+ register_param_count_ = register_parameter_count;
+
+ // The caller owns the the registers array, so we just set the pointer.
+ register_params_ = registers;
+}
+
+void CallInterfaceDescriptorData::InitializeTypes(
+ const MachineType* machine_types, int machine_types_length) {
+ DCHECK(IsInitializedRegisters());
const int types_length = return_count_ + param_count_;
// Machine types are either fully initialized or null.
@@ -77,7 +76,6 @@ bool CallInterfaceDescriptorData::AllStackParametersAreTagged() const {
void CallInterfaceDescriptorData::Reset() {
delete[] machine_types_;
machine_types_ = nullptr;
- delete[] register_params_;
register_params_ = nullptr;
}
@@ -105,27 +103,6 @@ void CallDescriptors::TearDown() {
}
}
-void CallInterfaceDescriptor::JSDefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int non_js_register_parameter_count) {
- DCHECK_LE(static_cast<unsigned>(non_js_register_parameter_count), 1);
-
- // 3 is for kTarget, kNewTarget and kActualArgumentsCount
- int register_parameter_count = 3 + non_js_register_parameter_count;
-
- DCHECK(!AreAliased(
- kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
- kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register));
-
- const Register default_js_stub_registers[] = {
- kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
- kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register};
-
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_js_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_js_stub_registers);
-}
-
const char* CallInterfaceDescriptor::DebugName() const {
CallDescriptors::Key key = CallDescriptors::GetKey(data_);
switch (key) {
@@ -140,492 +117,12 @@ const char* CallInterfaceDescriptor::DebugName() const {
return "";
}
-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
- return true;
-}
-#endif
-
-void VoidDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void AllocateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kAllocateSizeRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CEntry1ArgvOnStackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kRuntimeCallArgCountRegister,
- kRuntimeCallFunctionRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kRuntimeCallArgCountRegister,
- kRuntimeCallArgvRegister,
- kRuntimeCallFunctionRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {TargetRegister(), NewTargetRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-const Register FastNewObjectDescriptor::TargetRegister() {
- return kJSFunctionRegister;
-}
-
-const Register FastNewObjectDescriptor::NewTargetRegister() {
- return kJavaScriptCallNewTargetRegister;
-}
-
-void TailCallOptimizedCodeSlotDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kJavaScriptCallCodeStartRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {LoadDescriptor::ReceiverRegister(),
- LoadDescriptor::NameRegister(),
- LoadDescriptor::SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadNoFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ICKindRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadGlobalDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadGlobalBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {LoadGlobalDescriptor::NameRegister(),
- LoadGlobalDescriptor::SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LookupBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void LoadGlobalNoFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), ICKindRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), SlotRegister(), VectorRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadWithReceiverAndVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DCHECK(!AreAliased(ReceiverRegister(), LookupStartObjectRegister(),
- NameRegister(), SlotRegister(), VectorRegister()));
- Register registers[] = {ReceiverRegister(), LookupStartObjectRegister(),
- NameRegister(), SlotRegister(), VectorRegister()};
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void LoadWithReceiverBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- LoadWithReceiverAndVectorDescriptor::ReceiverRegister(),
- LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
- LoadWithReceiverAndVectorDescriptor::NameRegister(),
- LoadWithReceiverAndVectorDescriptor::SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void StoreGlobalDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreGlobalBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {StoreGlobalDescriptor::NameRegister(),
- StoreGlobalDescriptor::ValueRegister(),
- StoreGlobalDescriptor::SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
- VectorRegister()};
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(), StoreDescriptor::SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- ReceiverRegister(), NameRegister(), MapRegister(),
- ValueRegister(), SlotRegister(), VectorRegister(),
- };
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
- V8_TARGET_ARCH_ARM
- Register registers[] = {kContextRegister,
- kJSFunctionRegister,
- kJavaScriptCallArgCountRegister,
- kJavaScriptCallExtraArg1Register,
- kJavaScriptCallNewTargetRegister,
- kInterpreterBytecodeArrayRegister};
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- registers);
-#else
- InitializePlatformUnimplemented(data, kParameterCount);
-#endif
-}
-
-void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
- Register registers[] = {ParamsSizeRegister(), WeightRegister()};
- data->InitializePlatformSpecific(kParameterCount, registers);
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ return reg.code() % 2 == 0;
#else
- InitializePlatformUnimplemented(data, kParameterCount);
-#endif
-}
-
-void StringAtDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void StringAtAsStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void StringSubstringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void TypeConversionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ArgumentRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void TypeConversionNoContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {TypeConversionDescriptor::ArgumentRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void TypeConversion_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void SingleParameterOnStackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void GetIteratorStackParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void LoadWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
- VectorRegister()};
- // TODO(jgruber): This DCHECK could be enabled if RegisterBase::ListOf were
- // to allow no_reg entries.
- // DCHECK(!AreAliased(ReceiverRegister(), NameRegister(), SlotRegister(),
- // VectorRegister(), kRootRegister));
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister()};
- // TODO(jgruber): This DCHECK could be enabled if RegisterBase::ListOf were
- // to allow no_reg entries.
- // DCHECK(!AreAliased(ReceiverRegister(), NameRegister(), kRootRegister));
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-const Register ApiGetterDescriptor::ReceiverRegister() {
- return LoadDescriptor::ReceiverRegister();
-}
-
-void ApiGetterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), HolderRegister(),
- CallbackRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ContextOnlyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void NoContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void GrowArrayElementsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ObjectRegister(), KeyRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // This descriptor must use the same set of registers as the
- // ArrayNArgumentsConstructorDescriptor.
- ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(data);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // This descriptor must use the same set of registers as the
- // ArrayNArgumentsConstructorDescriptor.
- ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(data);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // Keep the arguments on the same registers as they were in
- // ArrayConstructorDescriptor to avoid unnecessary register moves.
- // kFunction, kAllocationSite, kActualArgumentsCount
- Register registers[] = {kJavaScriptCallTargetRegister,
- kJavaScriptCallExtraArg1Register,
- kJavaScriptCallArgCountRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-#if !V8_TARGET_ARCH_IA32
-// We need a custom descriptor on ia32 to avoid using xmm0.
-void WasmFloat32ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-// We need a custom descriptor on ia32 to avoid using xmm0.
-void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-#endif // !V8_TARGET_ARCH_IA32
-
-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
- !defined(V8_TARGET_ARCH_RISCV64)
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
+ return true;
#endif
-
-void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void CloneObjectBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-// static
-Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
- return CallDescriptors::call_descriptor_data(CallDescriptors::RunMicrotasks)
- ->register_param(0);
-}
-
-void RunMicrotasksDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void I64ToBigIntDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void I32PairToBigIntDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void BigIntToI64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void BigIntToI32PairDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 3);
-}
-
-void UnaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 2);
-}
-
-void ForInPrepareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void SuspendGeneratorBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void ResumeGeneratorBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 8d03907efc..e64826e6fc 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "src/base/logging.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/register-arch.h"
#include "src/codegen/tnode.h"
@@ -39,6 +40,7 @@ namespace internal {
V(CallFunctionTemplate) \
V(CallTrampoline) \
V(CallTrampoline_Baseline) \
+ V(CallTrampoline_Baseline_Compact) \
V(CallTrampoline_WithFeedback) \
V(CallVarargs) \
V(CallWithArrayLike) \
@@ -68,7 +70,6 @@ namespace internal {
V(EphemeronKeyBarrier) \
V(FastNewObject) \
V(ForInPrepare) \
- V(FrameDropperTrampoline) \
V(GetIteratorStackParameter) \
V(GetProperty) \
V(GrowArrayElements) \
@@ -151,17 +152,21 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
};
using Flags = base::Flags<Flag>;
+ static constexpr int kUninitializedCount = -1;
+
CallInterfaceDescriptorData() = default;
CallInterfaceDescriptorData(const CallInterfaceDescriptorData&) = delete;
CallInterfaceDescriptorData& operator=(const CallInterfaceDescriptorData&) =
delete;
- // A copy of the passed in registers and param_representations is made
- // and owned by the CallInterfaceDescriptorData.
-
- void InitializePlatformSpecific(int register_parameter_count,
- const Register* registers);
+ // The passed registers are owned by the caller, and their lifetime is
+ // expected to exceed that of this data. In practice, they are expected to
+ // be in a static local.
+ void InitializeRegisters(Flags flags, int return_count, int parameter_count,
+ StackArgumentOrder stack_order,
+ int register_parameter_count,
+ const Register* registers);
// if machine_types is null, then an array of size
// (return_count + parameter_count) will be created with
@@ -171,17 +176,13 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// (return_count + parameter_count). Those members of the parameter array will
// be initialized from {machine_types}, and the rest initialized to
// MachineType::AnyTagged().
- void InitializePlatformIndependent(Flags flags, int return_count,
- int parameter_count,
- const MachineType* machine_types,
- int machine_types_length,
- StackArgumentOrder stack_order);
+ void InitializeTypes(const MachineType* machine_types,
+ int machine_types_length);
void Reset();
bool IsInitialized() const {
- return IsInitializedPlatformSpecific() &&
- IsInitializedPlatformIndependent();
+ return IsInitializedRegisters() && IsInitializedTypes();
}
Flags flags() const { return flags_; }
@@ -189,7 +190,6 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
int param_count() const { return param_count_; }
int register_param_count() const { return register_param_count_; }
Register register_param(int index) const { return register_params_[index]; }
- Register* register_params() const { return register_params_; }
MachineType return_type(int index) const {
DCHECK_LT(index, return_count_);
return machine_types_[index];
@@ -200,9 +200,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
}
StackArgumentOrder stack_order() const { return stack_order_; }
- void RestrictAllocatableRegisters(const Register* registers, int num) {
+ void RestrictAllocatableRegisters(const Register* registers, size_t num) {
DCHECK_EQ(allocatable_registers_, 0);
- for (int i = 0; i < num; ++i) {
+ for (size_t i = 0; i < num; ++i) {
allocatable_registers_ |= registers[i].bit();
}
DCHECK_GT(NumRegs(allocatable_registers_), 0);
@@ -211,17 +211,17 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
RegList allocatable_registers() const { return allocatable_registers_; }
private:
- bool IsInitializedPlatformSpecific() const {
+ bool IsInitializedRegisters() const {
const bool initialized =
- (register_param_count_ == 0 && register_params_ == nullptr) ||
- (register_param_count_ > 0 && register_params_ != nullptr);
- // Platform-specific initialization happens before platform-independent.
+ return_count_ != kUninitializedCount &&
+ param_count_ != kUninitializedCount &&
+ (register_param_count_ == 0 || register_params_ != nullptr);
+ // Register initialization happens before type initialization.
return initialized;
}
- bool IsInitializedPlatformIndependent() const {
- const bool initialized =
- return_count_ >= 0 && param_count_ >= 0 && machine_types_ != nullptr;
- // Platform-specific initialization happens before platform-independent.
+ bool IsInitializedTypes() const {
+ const bool initialized = machine_types_ != nullptr;
+ // Register initialization happens before type initialization.
return initialized;
}
@@ -229,9 +229,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
bool AllStackParametersAreTagged() const;
#endif // DEBUG
- int register_param_count_ = -1;
- int return_count_ = -1;
- int param_count_ = -1;
+ int register_param_count_ = kUninitializedCount;
+ int return_count_ = kUninitializedCount;
+ int param_count_ = kUninitializedCount;
Flags flags_ = kNoFlags;
StackArgumentOrder stack_order_ = StackArgumentOrder::kDefault;
@@ -242,10 +242,10 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// |registers_params_| defines registers that are used for parameter passing.
// |machine_types_| defines machine types for resulting values and incomping
// parameters.
- // Both arrays are allocated dynamically by the InterfaceDescriptor and
- // freed on destruction. This is because static arrays cause creation of
- // runtime static initializers which we don't want.
- Register* register_params_ = nullptr;
+ // The register params array is owned by the caller, and it's expected that it
+ // is a static local stored in the caller function. The machine types are
+ // allocated dynamically by the InterfaceDescriptor and freed on destruction.
+ const Register* register_params_ = nullptr;
MachineType* machine_types_ = nullptr;
};
@@ -278,12 +278,35 @@ class V8_EXPORT_PRIVATE CallDescriptors : public AllStatic {
call_descriptor_data_[NUMBER_OF_DESCRIPTORS];
};
+#if defined(V8_TARGET_ARCH_IA32)
+// To support all possible cases, we must limit the number of register args for
+// TFS builtins on ia32 to 3. Out of the 6 allocatable registers, esi is taken
+// as the context register and ebx is the root register. One register must
+// remain available to store the jump/call target. Thus 3 registers remain for
+// arguments. The reason this applies to TFS builtins specifically is because
+// this becomes relevant for builtins used as targets of Torque function
+// pointers (which must have a register available to store the target).
+// TODO(jgruber): Ideally we should just decrement kMaxBuiltinRegisterParams but
+// that comes with its own set of complications. It's possible, but requires
+// refactoring the calling convention of other existing stubs.
+constexpr int kMaxBuiltinRegisterParams = 4;
+constexpr int kMaxTFSBuiltinRegisterParams = 3;
+#else
+constexpr int kMaxBuiltinRegisterParams = 5;
+constexpr int kMaxTFSBuiltinRegisterParams = kMaxBuiltinRegisterParams;
+#endif
+STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
+constexpr int kJSBuiltinRegisterParams = 4;
+
+// Polymorphic base class for call interface descriptors, which defines getters
+// for the various descriptor properties via a runtime-loaded
+// CallInterfaceDescriptorData field.
class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
public:
using Flags = CallInterfaceDescriptorData::Flags;
CallInterfaceDescriptor() : data_(nullptr) {}
- virtual ~CallInterfaceDescriptor() = default;
+ ~CallInterfaceDescriptor() = default;
explicit CallInterfaceDescriptor(CallDescriptors::Key key)
: data_(CallDescriptors::call_descriptor_data(key)) {}
@@ -333,7 +356,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
return data()->stack_order();
}
- static const Register ContextRegister();
+ static constexpr inline Register ContextRegister() {
+ return kContextRegister;
+ }
const char* DebugName() const;
@@ -344,39 +369,13 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
- virtual void InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- UNREACHABLE();
- }
-
- // Initializes |data| to an unspecified state, for platforms that haven't
- // implemented a given builtin.
- static void InitializePlatformUnimplemented(CallInterfaceDescriptorData* data,
- int register_parameter_count) {
- DefaultInitializePlatformSpecific(data,
- std::min(register_parameter_count, 4));
- }
-
- virtual void InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // Default descriptor configuration: one result, all parameters are passed
- // in registers and all parameters have MachineType::AnyTagged() type.
- data->InitializePlatformIndependent(
- CallInterfaceDescriptorData::kNoFlags, 1, data->register_param_count(),
- nullptr, 0, StackArgumentOrder::kDefault);
- }
-
- // Initializes |data| using the platform dependent default set of registers.
- // It is intended to be used for TurboFan stubs when particular set of
- // registers does not matter.
- static void DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count);
-
- // Initializes |data| using the platform dependent default set of registers
- // for JavaScript-compatible calling convention.
- // It is intended to be used for TurboFan stubs being called with JavaScript
- // linkage + additional parameters on registers and stack.
- static void JSDefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int non_js_register_parameter_count);
+ // Helper for defining the default register set.
+ //
+ // Use auto for the return type to allow different architectures to have
+ // differently sized default register arrays.
+ static constexpr inline auto DefaultRegisterArray();
+ static constexpr inline std::array<Register, kJSBuiltinRegisterParams>
+ DefaultJSRegisterArray();
// Checks if float parameters are not assigned invalid registers.
bool CheckFloatingPointParameters(CallInterfaceDescriptorData* data) {
@@ -393,104 +392,164 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
bool IsValidFloatParameterRegister(Register reg);
private:
+ const CallInterfaceDescriptorData* data_;
+};
+
+// CRTP base class for call interface descriptors, which defines static getters
+// for the various descriptor properties based on static values defined in the
+// subclass.
+template <typename DerivedDescriptor>
+class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ // ===========================================================================
+ // The following are the descriptor's CRTP configuration points, overwritable
+ // by DerivedDescriptor.
+ static constexpr int kReturnCount =
+ CallInterfaceDescriptorData::kUninitializedCount;
+ static constexpr int kParameterCount =
+ CallInterfaceDescriptorData::kUninitializedCount;
+ static constexpr bool kNoContext = false;
+ static constexpr bool kAllowVarArgs = false;
+ static constexpr bool kNoStackScan = false;
+ static constexpr auto kStackArgumentOrder = StackArgumentOrder::kDefault;
+
+ // The set of registers available to the parameters, as a
+ // std::array<Register,N>. Can be larger or smaller than kParameterCount; if
+ // larger then any remaining registers are ignored; if smaller, any parameters
+ // after registers().size() will be stack registers.
+ //
+ // Defaults to CallInterfaceDescriptor::DefaultRegisterArray().
+ static constexpr inline auto registers();
+
+ // An additional limit on the number of register parameters allowed. This is
+ // here so that it can be overwritten to kMaxTFSBuiltinRegisterParams for TFS
+ // builtins, see comment on kMaxTFSBuiltinRegisterParams above.
+ static constexpr int kMaxRegisterParams = kMaxBuiltinRegisterParams;
+
+ // If set to true, the descriptor will restrict the set of allocatable
+ // registers to the set returned by registers(). Then, it is expected that
+ // the first kParameterCount registers() are the parameters of the builtin.
+ static constexpr bool kRestrictAllocatableRegisters = false;
+
+ // End of customization points.
+ // ===========================================================================
+
+ static constexpr inline Flags flags() {
+ return Flags((DerivedDescriptor::kNoContext
+ ? CallInterfaceDescriptorData::kNoContext
+ : 0) |
+ (DerivedDescriptor::kAllowVarArgs
+ ? CallInterfaceDescriptorData::kAllowVarArgs
+ : 0) |
+ (DerivedDescriptor::kNoStackScan
+ ? CallInterfaceDescriptorData::kNoStackScan
+ : 0));
+ }
+ static constexpr inline bool AllowVarArgs() {
+ return DerivedDescriptor::kAllowVarArgs;
+ }
+ static constexpr inline bool HasContextParameter() {
+ return !DerivedDescriptor::kNoContext;
+ }
+
+ static constexpr inline int GetReturnCount();
+ static constexpr inline int GetParameterCount();
+ static constexpr inline int GetRegisterParameterCount();
+ static constexpr inline int GetStackParameterCount();
+ static constexpr inline Register* GetRegisterData();
+
+ static constexpr inline Register GetRegisterParameter(int i) {
+ return DerivedDescriptor::registers()[i];
+ }
+
+ explicit StaticCallInterfaceDescriptor(CallDescriptors::Key key)
+ : CallInterfaceDescriptor(key) {}
+
+ private:
// {CallDescriptors} is allowed to call the private {Initialize} method.
friend class CallDescriptors;
- const CallInterfaceDescriptorData* data_;
+ inline void Initialize(CallInterfaceDescriptorData* data);
- void Initialize(CallInterfaceDescriptorData* data) {
- // The passed pointer should be a modifiable pointer to our own data.
- DCHECK_EQ(data, data_);
- DCHECK(!data->IsInitialized());
- InitializePlatformSpecific(data);
- InitializePlatformIndependent(data);
- DCHECK(data->IsInitialized());
- DCHECK(CheckFloatingPointParameters(data));
+ // Set up the types of the descriptor. This is a static function, so that it
+ // is overwritable by subclasses. By default, all parameters have
+ // MachineType::AnyTagged() type.
+ static void InitializeTypes(CallInterfaceDescriptorData* data) {
+ data->InitializeTypes(nullptr, 0);
}
};
-#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- public: \
- explicit name() : base(key()) {} \
- static inline CallDescriptors::Key key();
+template <typename Descriptor>
+class StaticJSCallInterfaceDescriptor
+ : public StaticCallInterfaceDescriptor<Descriptor> {
+ public:
+ static constexpr auto kStackArgumentOrder = StackArgumentOrder::kJS;
+ static constexpr inline auto registers();
-#if defined(V8_TARGET_ARCH_IA32)
-// To support all possible cases, we must limit the number of register args for
-// TFS builtins on ia32 to 3. Out of the 6 allocatable registers, esi is taken
-// as the context register and ebx is the root register. One register must
-// remain available to store the jump/call target. Thus 3 registers remain for
-// arguments. The reason this applies to TFS builtins specifically is because
-// this becomes relevant for builtins used as targets of Torque function
-// pointers (which must have a register available to store the target).
-// TODO(jgruber): Ideally we should just decrement kMaxBuiltinRegisterParams but
-// that comes with its own set of complications. It's possible, but requires
-// refactoring the calling convention of other existing stubs.
-constexpr int kMaxBuiltinRegisterParams = 4;
-constexpr int kMaxTFSBuiltinRegisterParams = 3;
-#else
-constexpr int kMaxBuiltinRegisterParams = 5;
-constexpr int kMaxTFSBuiltinRegisterParams = kMaxBuiltinRegisterParams;
-#endif
-STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
+ using StaticCallInterfaceDescriptor<
+ Descriptor>::StaticCallInterfaceDescriptor;
+};
-#define DECLARE_DEFAULT_DESCRIPTOR(name, base) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- static const int kRegisterParams = \
- kParameterCount > kMaxTFSBuiltinRegisterParams \
- ? kMaxTFSBuiltinRegisterParams \
- : kParameterCount; \
- static const int kStackParams = kParameterCount - kRegisterParams; \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- DefaultInitializePlatformSpecific(data, kRegisterParams); \
- } \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
- data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
- kParameterCount, nullptr, 0, \
- kStackArgumentOrder); \
- } \
- name(CallDescriptors::Key key) : base(key) {} \
- \
- public:
-
-#define DECLARE_JS_COMPATIBLE_DESCRIPTOR(name, base, \
- non_js_reg_parameters_count) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- JSDefaultInitializePlatformSpecific(data, non_js_reg_parameters_count); \
- } \
- name(CallDescriptors::Key key) : base(key) {} \
- \
- public:
-
-#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, stack_order, \
- return_count, ...) \
- static constexpr int kDescriptorFlags = flags; \
- static constexpr int kReturnCount = return_count; \
- static constexpr StackArgumentOrder kStackArgumentOrder = stack_order; \
- enum ParameterIndices { \
- __dummy = -1, /* to be able to pass zero arguments */ \
- ##__VA_ARGS__, \
- \
- kParameterCount, \
- kContext = kParameterCount /* implicit parameter */ \
- };
+template <Builtins::Name kBuiltin>
+struct CallInterfaceDescriptorFor;
+
+// Stub class replacing std::array<Register, 0>, as a workaround for MSVC's
+// https://github.com/microsoft/STL/issues/942
+struct EmptyRegisterArray {
+ Register* data() { return nullptr; }
+ size_t size() const { return 0; }
+ Register operator[](size_t i) const { UNREACHABLE(); }
+};
-#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, \
- return_count, ##__VA_ARGS__)
+// Helper method for defining an array of registers for the various
+// Descriptor::registers() methods.
+template <typename... Registers>
+constexpr std::array<Register, 1 + sizeof...(Registers)> RegisterArray(
+ Register first_reg, Registers... regs) {
+ return {first_reg, regs...};
+}
+constexpr EmptyRegisterArray RegisterArray() { return {}; }
+
+#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ public: \
+ /* StaticCallInterfaceDescriptor can call Initialize methods */ \
+ friend class StaticCallInterfaceDescriptor<name>; \
+ explicit name() : base(key()) {} \
+ static inline CallDescriptors::Key key();
+
+#define DECLARE_DEFAULT_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticCallInterfaceDescriptor) \
+ static constexpr int kMaxRegisterParams = kMaxTFSBuiltinRegisterParams; \
+ \
+ protected: \
+ explicit name(CallDescriptors::Key key) \
+ : StaticCallInterfaceDescriptor(key) {} \
+ \
+ public:
+
+#define DECLARE_JS_COMPATIBLE_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticJSCallInterfaceDescriptor) \
+ protected: \
+ explicit name(CallDescriptors::Key key) \
+ : StaticJSCallInterfaceDescriptor(key) {} \
+ \
+ public:
+
+#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
+ static constexpr int kReturnCount = return_count; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
+ };
// This is valid only for builtins that use EntryFrame, which does not scan
// stack arguments on GC.
#define DEFINE_PARAMETERS_ENTRY(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kNoContext | \
- CallInterfaceDescriptorData::kNoStackScan; \
+ static constexpr bool kNoContext = true; \
+ static constexpr bool kNoStackScan = true; \
static constexpr StackArgumentOrder kStackArgumentOrder = \
StackArgumentOrder::kDefault; \
static constexpr int kReturnCount = 1; \
@@ -501,37 +560,37 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kParameterCount \
};
-#define DEFINE_PARAMETERS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, 1, \
- ##__VA_ARGS__)
+#define DEFINE_PARAMETERS(...) DEFINE_RESULT_AND_PARAMETERS(1, ##__VA_ARGS__)
+
+#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
+ DEFINE_PARAMETERS(__VA_ARGS__) \
+ static constexpr bool kNoContext = true;
+
+#define DEFINE_PARAMETERS_VARARGS(...) \
+ DEFINE_PARAMETERS(__VA_ARGS__) \
+ static constexpr bool kAllowVarArgs = true; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS;
-#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoContext, StackArgumentOrder::kDefault, \
- 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(...) \
+ DEFINE_PARAMETERS_NO_CONTEXT(__VA_ARGS__) \
+ static constexpr bool kAllowVarArgs = true; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS;
-#define DEFINE_PARAMETERS_VARARGS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kAllowVarArgs, StackArgumentOrder::kJS, 1, \
- ##__VA_ARGS__)
+#define DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(return_count, ...) \
+ DEFINE_RESULT_AND_PARAMETERS(return_count, ##__VA_ARGS__) \
+ static constexpr bool kNoContext = true;
-#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
+#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
+ static void InitializeTypes(CallInterfaceDescriptorData* data) { \
MachineType machine_types[] = {__VA_ARGS__}; \
static_assert( \
kReturnCount + kParameterCount == arraysize(machine_types), \
"Parameter names definition is not consistent with parameter types"); \
- data->InitializePlatformIndependent( \
- Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
- machine_types, arraysize(machine_types), kStackArgumentOrder); \
+ data->InitializeTypes(machine_types, arraysize(machine_types)); \
}
-#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
- DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG( \
- CallInterfaceDescriptorData::kNoFlags, __VA_ARGS__)
-
#define DEFINE_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
@@ -539,8 +598,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
// When the extra arguments described here are located in the stack, they are
// just above the return address in the frame (first arguments).
#define DEFINE_JS_PARAMETERS(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs; \
+ static constexpr bool kAllowVarArgs = true; \
static constexpr int kReturnCount = 1; \
static constexpr StackArgumentOrder kStackArgumentOrder = \
StackArgumentOrder::kJS; \
@@ -554,9 +612,8 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
};
#define DEFINE_JS_PARAMETERS_NO_CONTEXT(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs | \
- CallInterfaceDescriptorData::kNoContext; \
+ static constexpr bool kAllowVarArgs = true; \
+ static constexpr bool kNoContext = true; \
static constexpr int kReturnCount = 1; \
static constexpr StackArgumentOrder kStackArgumentOrder = \
StackArgumentOrder::kJS; \
@@ -574,63 +631,22 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
MachineType::Int32(), /* kActualArgumentsCount */ \
##__VA_ARGS__)
-#define DECLARE_DESCRIPTOR(name, base) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
- name(CallDescriptors::Key key) : base(key) {} \
- \
+#define DECLARE_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticCallInterfaceDescriptor) \
+ protected: \
+ explicit name(CallDescriptors::Key key) \
+ : StaticCallInterfaceDescriptor(key) {} \
+ \
public:
-class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
+class V8_EXPORT_PRIVATE VoidDescriptor
+ : public StaticCallInterfaceDescriptor<VoidDescriptor> {
public:
DEFINE_PARAMETERS()
DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
-};
-
-// This class is subclassed by Torque-generated call interface descriptors.
-template <int return_count, int parameter_count, bool has_context_parameter>
-class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
- public:
- static constexpr int kDescriptorFlags =
- has_context_parameter ? CallInterfaceDescriptorData::kNoFlags
- : CallInterfaceDescriptorData::kNoContext;
- static constexpr int kParameterCount = parameter_count;
- enum ParameterIndices { kContext = kParameterCount };
- template <int i>
- static ParameterIndices ParameterIndex() {
- STATIC_ASSERT(0 <= i && i < kParameterCount);
- return static_cast<ParameterIndices>(i);
- }
- static constexpr int kReturnCount = return_count;
-
- using CallInterfaceDescriptor::CallInterfaceDescriptor;
+ DECLARE_DESCRIPTOR(VoidDescriptor)
- protected:
- static const int kRegisterParams =
- kParameterCount > kMaxTFSBuiltinRegisterParams
- ? kMaxTFSBuiltinRegisterParams
- : kParameterCount;
- static const int kStackParams = kParameterCount - kRegisterParams;
- virtual std::vector<MachineType> ReturnType() = 0;
- virtual std::array<MachineType, kParameterCount> ParameterTypes() = 0;
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override {
- DefaultInitializePlatformSpecific(data, kRegisterParams);
- }
- void InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) override {
- std::vector<MachineType> machine_types = ReturnType();
- DCHECK_EQ(kReturnCount, machine_types.size());
- auto parameter_types = ParameterTypes();
- machine_types.insert(machine_types.end(), parameter_types.begin(),
- parameter_types.end());
- DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
- data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
- kParameterCount, machine_types.data(),
- static_cast<int>(machine_types.size()),
- StackArgumentOrder::kDefault);
- }
+ static constexpr auto registers();
};
// Dummy descriptor used to mark builtins that don't yet have their proper
@@ -646,180 +662,171 @@ using CCallDescriptor = VoidDescriptor;
// here.
using DeoptimizationEntryDescriptor = VoidDescriptor;
-class AllocateDescriptor : public CallInterfaceDescriptor {
+class AllocateDescriptor
+ : public StaticCallInterfaceDescriptor<AllocateDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
MachineType::IntPtr()) // kRequestedSize
- DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(AllocateDescriptor)
+
+ static constexpr auto registers();
};
// This descriptor defines the JavaScript calling convention that can be used
// by stubs: target, new.target, argc (not including the receiver) and context
// are passed in registers while receiver and the rest of the JS arguments are
// passed on the stack.
-class JSTrampolineDescriptor : public CallInterfaceDescriptor {
+class JSTrampolineDescriptor
+ : public StaticJSCallInterfaceDescriptor<JSTrampolineDescriptor> {
public:
DEFINE_JS_PARAMETERS()
DEFINE_JS_PARAMETER_TYPES()
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(JSTrampolineDescriptor,
- CallInterfaceDescriptor, 0)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(JSTrampolineDescriptor)
};
-class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+class ContextOnlyDescriptor
+ : public StaticCallInterfaceDescriptor<ContextOnlyDescriptor> {
public:
DEFINE_PARAMETERS()
DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ContextOnlyDescriptor)
+
+ static constexpr auto registers();
};
-class NoContextDescriptor : public CallInterfaceDescriptor {
+class NoContextDescriptor
+ : public StaticCallInterfaceDescriptor<NoContextDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT()
DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(NoContextDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(NoContextDescriptor)
+
+ static constexpr auto registers();
};
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
-class LoadDescriptor : public CallInterfaceDescriptor {
+class LoadDescriptor : public StaticCallInterfaceDescriptor<LoadDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadDescriptor)
+
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register NameRegister();
+ static constexpr inline Register SlotRegister();
- static const Register ReceiverRegister();
- static const Register NameRegister();
- static const Register SlotRegister();
+ static constexpr auto registers();
};
// LoadBaselineDescriptor is a load descriptor that does not take a context as
// input.
-class LoadBaselineDescriptor : public CallInterfaceDescriptor {
+class LoadBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LoadBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadBaselineDescriptor)
+
+ static constexpr auto registers();
};
-class LoadGlobalNoFeedbackDescriptor : public CallInterfaceDescriptor {
+class LoadGlobalNoFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalNoFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kName, kICKind)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kICKind
- DECLARE_DESCRIPTOR(LoadGlobalNoFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalNoFeedbackDescriptor)
- static const Register NameRegister() {
- return LoadDescriptor::NameRegister();
- }
+ static constexpr inline Register ICKindRegister();
- static const Register ICKindRegister() {
- return LoadDescriptor::SlotRegister();
- }
+ static constexpr auto registers();
};
-class LoadNoFeedbackDescriptor : public LoadGlobalNoFeedbackDescriptor {
+class LoadNoFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<LoadNoFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kICKind)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kICKind
- DECLARE_DESCRIPTOR(LoadNoFeedbackDescriptor, LoadGlobalNoFeedbackDescriptor)
-
- static const Register ReceiverRegister() {
- return LoadDescriptor::ReceiverRegister();
- }
+ DECLARE_DESCRIPTOR(LoadNoFeedbackDescriptor)
- static const Register NameRegister() {
- return LoadGlobalNoFeedbackDescriptor::NameRegister();
- }
+ static constexpr inline Register ICKindRegister();
- static const Register ICKindRegister() {
- return LoadGlobalNoFeedbackDescriptor::ICKindRegister();
- }
+ static constexpr auto registers();
};
-class LoadGlobalDescriptor : public CallInterfaceDescriptor {
+class LoadGlobalDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalDescriptor> {
public:
DEFINE_PARAMETERS(kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadGlobalDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalDescriptor)
- static const Register NameRegister() {
- return LoadDescriptor::NameRegister();
- }
-
- static const Register SlotRegister() {
- return LoadDescriptor::SlotRegister();
- }
+ static constexpr auto registers();
};
-class LoadGlobalBaselineDescriptor : public CallInterfaceDescriptor {
+class LoadGlobalBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadGlobalBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalBaselineDescriptor)
+
+ static constexpr auto registers();
};
-class LookupBaselineDescriptor : public CallInterfaceDescriptor {
+class LookupBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LookupBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kName, kDepth, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kDepth
MachineType::AnyTagged()) // kSlot
- DECLARE_DESCRIPTOR(LookupBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LookupBaselineDescriptor)
};
-class StoreDescriptor : public CallInterfaceDescriptor {
+class StoreDescriptor : public StaticCallInterfaceDescriptor<StoreDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StoreDescriptor)
- static const Register ReceiverRegister();
- static const Register NameRegister();
- static const Register ValueRegister();
- static const Register SlotRegister();
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register NameRegister();
+ static constexpr inline Register ValueRegister();
+ static constexpr inline Register SlotRegister();
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+ static constexpr auto registers();
};
-class StoreBaselineDescriptor : public CallInterfaceDescriptor {
+class StoreBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<StoreBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StoreBaselineDescriptor)
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+ static constexpr auto registers();
};
-class StoreTransitionDescriptor : public StoreDescriptor {
+class StoreTransitionDescriptor
+ : public StaticCallInterfaceDescriptor<StoreTransitionDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
@@ -828,17 +835,15 @@ class StoreTransitionDescriptor : public StoreDescriptor {
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(StoreTransitionDescriptor, StoreDescriptor)
+ DECLARE_DESCRIPTOR(StoreTransitionDescriptor)
- static const Register MapRegister();
- static const Register SlotRegister();
- static const Register VectorRegister();
+ static constexpr inline Register MapRegister();
- // Pass value, slot and vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr auto registers();
};
-class StoreWithVectorDescriptor : public StoreDescriptor {
+class StoreWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<StoreWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
@@ -846,72 +851,52 @@ class StoreWithVectorDescriptor : public StoreDescriptor {
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(StoreWithVectorDescriptor, StoreDescriptor)
+ DECLARE_DESCRIPTOR(StoreWithVectorDescriptor)
- static const Register VectorRegister();
+ static constexpr inline Register VectorRegister();
- // Pass value, slot and vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr auto registers();
};
-class StoreGlobalDescriptor : public CallInterfaceDescriptor {
+class StoreGlobalDescriptor
+ : public StaticCallInterfaceDescriptor<StoreGlobalDescriptor> {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreGlobalDescriptor, CallInterfaceDescriptor)
-
- static const bool kPassLastArgsOnStack =
- StoreDescriptor::kPassLastArgsOnStack;
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
-
- static const Register NameRegister() {
- return StoreDescriptor::NameRegister();
- }
-
- static const Register ValueRegister() {
- return StoreDescriptor::ValueRegister();
- }
+ DECLARE_DESCRIPTOR(StoreGlobalDescriptor)
- static const Register SlotRegister() {
- return StoreDescriptor::SlotRegister();
- }
+ static constexpr auto registers();
};
-class StoreGlobalBaselineDescriptor : public CallInterfaceDescriptor {
+class StoreGlobalBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<StoreGlobalBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreGlobalBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StoreGlobalBaselineDescriptor)
- static const bool kPassLastArgsOnStack =
- StoreDescriptor::kPassLastArgsOnStack;
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+ static constexpr auto registers();
};
-class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
+class StoreGlobalWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<StoreGlobalWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(StoreGlobalWithVectorDescriptor, StoreGlobalDescriptor)
-
- static const Register VectorRegister() {
- return StoreWithVectorDescriptor::VectorRegister();
- }
+ DECLARE_DESCRIPTOR(StoreGlobalWithVectorDescriptor)
- // Pass value, slot and vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr auto registers();
};
-class LoadWithVectorDescriptor : public LoadDescriptor {
+class LoadWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<LoadWithVectorDescriptor> {
public:
// TODO(v8:9497): Revert the Machine type for kSlot to the
// TaggedSigned once Torque can emit better call descriptors
@@ -920,24 +905,19 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(LoadWithVectorDescriptor, LoadDescriptor)
+ DECLARE_DESCRIPTOR(LoadWithVectorDescriptor)
- static const Register VectorRegister();
-
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
+ static constexpr inline Register VectorRegister();
- // Pass vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ static constexpr auto registers();
};
// Like LoadWithVectorDescriptor, except we pass the receiver (the object which
// should be used as the receiver for accessor function calls) and the lookup
// start object separately.
-class LoadWithReceiverAndVectorDescriptor : public LoadWithVectorDescriptor {
+class LoadWithReceiverAndVectorDescriptor
+ : public StaticCallInterfaceDescriptor<
+ LoadWithReceiverAndVectorDescriptor> {
public:
// TODO(v8:9497): Revert the Machine type for kSlot to the
// TaggedSigned once Torque can emit better call descriptors
@@ -947,22 +927,15 @@ class LoadWithReceiverAndVectorDescriptor : public LoadWithVectorDescriptor {
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(LoadWithReceiverAndVectorDescriptor,
- LoadWithVectorDescriptor)
+ DECLARE_DESCRIPTOR(LoadWithReceiverAndVectorDescriptor)
- static const Register LookupStartObjectRegister();
+ static constexpr inline Register LookupStartObjectRegister();
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ static constexpr auto registers();
};
-class LoadWithReceiverBaselineDescriptor : public LoadBaselineDescriptor {
+class LoadWithReceiverBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LoadWithReceiverBaselineDescriptor> {
public:
// TODO(v8:9497): Revert the Machine type for kSlot to the
// TaggedSigned once Torque can emit better call descriptors
@@ -971,29 +944,27 @@ class LoadWithReceiverBaselineDescriptor : public LoadBaselineDescriptor {
MachineType::AnyTagged(), // kLookupStartObject
MachineType::AnyTagged(), // kName
MachineType::AnyTagged()) // kSlot
- DECLARE_DESCRIPTOR(LoadWithReceiverBaselineDescriptor, LoadBaselineDescriptor)
+ DECLARE_DESCRIPTOR(LoadWithReceiverBaselineDescriptor)
+
+ static constexpr auto registers();
};
-class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
+class LoadGlobalWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kName, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(LoadGlobalWithVectorDescriptor, LoadGlobalDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalWithVectorDescriptor)
-#if V8_TARGET_ARCH_IA32
- // On ia32, LoadWithVectorDescriptor passes vector on the stack and thus we
- // need to choose a new register here.
- static const Register VectorRegister() { return edx; }
-#else
- static const Register VectorRegister() {
- return LoadWithVectorDescriptor::VectorRegister();
- }
-#endif
+ static constexpr inline Register VectorRegister();
+
+ static constexpr auto registers();
};
-class DynamicCheckMapsDescriptor final : public CallInterfaceDescriptor {
+class DynamicCheckMapsDescriptor final
+ : public StaticCallInterfaceDescriptor<DynamicCheckMapsDescriptor> {
public:
DEFINE_PARAMETERS(kMap, kSlot, kHandler)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // return val
@@ -1001,20 +972,28 @@ class DynamicCheckMapsDescriptor final : public CallInterfaceDescriptor {
MachineType::IntPtr(), // kSlot
MachineType::TaggedSigned()) // kHandler
- DECLARE_DESCRIPTOR(DynamicCheckMapsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(DynamicCheckMapsDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
};
-class FastNewObjectDescriptor : public CallInterfaceDescriptor {
+class FastNewObjectDescriptor
+ : public StaticCallInterfaceDescriptor<FastNewObjectDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged()) // kNewTarget
- DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
- static const Register TargetRegister();
- static const Register NewTargetRegister();
+ DECLARE_DESCRIPTOR(FastNewObjectDescriptor)
+
+ static constexpr inline Register TargetRegister();
+ static constexpr inline Register NewTargetRegister();
+
+ static constexpr auto registers();
};
-class RecordWriteDescriptor final : public CallInterfaceDescriptor {
+class RecordWriteDescriptor final
+ : public StaticCallInterfaceDescriptor<RecordWriteDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlot, kRememberedSet, kFPMode)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
@@ -1022,90 +1001,119 @@ class RecordWriteDescriptor final : public CallInterfaceDescriptor {
MachineType::TaggedSigned(), // kRememberedSet
MachineType::TaggedSigned()) // kFPMode
- DECLARE_DESCRIPTOR(RecordWriteDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(RecordWriteDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
};
-class EphemeronKeyBarrierDescriptor final : public CallInterfaceDescriptor {
+class EphemeronKeyBarrierDescriptor final
+ : public StaticCallInterfaceDescriptor<EphemeronKeyBarrierDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlotAddress, kFPMode)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
MachineType::Pointer(), // kSlotAddress
MachineType::TaggedSigned()) // kFPMode
- DECLARE_DESCRIPTOR(EphemeronKeyBarrierDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(EphemeronKeyBarrierDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
};
-class TypeConversionDescriptor final : public CallInterfaceDescriptor {
+class TypeConversionDescriptor final
+ : public StaticCallInterfaceDescriptor<TypeConversionDescriptor> {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeConversionDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeConversionDescriptor)
+
+ static constexpr inline Register ArgumentRegister();
- static const Register ArgumentRegister();
+ static constexpr auto registers();
};
-class TypeConversionNoContextDescriptor final : public CallInterfaceDescriptor {
+class TypeConversionNoContextDescriptor final
+ : public StaticCallInterfaceDescriptor<TypeConversionNoContextDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeConversionNoContextDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeConversionNoContextDescriptor)
+
+ static constexpr auto registers();
};
-class TypeConversion_BaselineDescriptor final : public CallInterfaceDescriptor {
+class TypeConversion_BaselineDescriptor final
+ : public StaticCallInterfaceDescriptor<TypeConversion_BaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kArgument, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::UintPtr())
- DECLARE_DESCRIPTOR(TypeConversion_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeConversion_BaselineDescriptor)
};
-class SingleParameterOnStackDescriptor final : public CallInterfaceDescriptor {
+class SingleParameterOnStackDescriptor final
+ : public StaticCallInterfaceDescriptor<SingleParameterOnStackDescriptor> {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(SingleParameterOnStackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(SingleParameterOnStackDescriptor)
+
+ static constexpr auto registers();
};
class AsyncFunctionStackParameterDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ AsyncFunctionStackParameterDescriptor> {
public:
DEFINE_PARAMETERS(kPromise, kResult)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(AsyncFunctionStackParameterDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(AsyncFunctionStackParameterDescriptor)
+
+ static constexpr auto registers();
};
class GetIteratorStackParameterDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ GetIteratorStackParameterDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor)
+
+ static constexpr auto registers();
};
-class GetPropertyDescriptor final : public CallInterfaceDescriptor {
+class GetPropertyDescriptor final
+ : public StaticCallInterfaceDescriptor<GetPropertyDescriptor> {
public:
DEFINE_PARAMETERS(kObject, kKey)
- DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor, CallInterfaceDescriptor)
+ DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor)
};
-class TypeofDescriptor : public CallInterfaceDescriptor {
+class TypeofDescriptor
+ : public StaticCallInterfaceDescriptor<TypeofDescriptor> {
public:
DEFINE_PARAMETERS(kObject)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeofDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeofDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallTrampolineDescriptor : public CallInterfaceDescriptor {
+class CallTrampolineDescriptor
+ : public StaticCallInterfaceDescriptor<CallTrampolineDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32()) // kActualArgumentsCount
- DECLARE_DESCRIPTOR(CallTrampolineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallTrampolineDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallVarargsDescriptor : public CallInterfaceDescriptor {
+class CallVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<CallVarargsDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kActualArgumentsCount, kArgumentsLength,
kArgumentsList)
@@ -1113,123 +1121,143 @@ class CallVarargsDescriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kActualArgumentsCount
MachineType::Int32(), // kArgumentsLength
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(CallVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
+class CallForwardVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<CallForwardVarargsDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kActualArgumentsCount, kStartIndex)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kActualArgumentsCount
MachineType::Int32()) // kStartIndex
- DECLARE_DESCRIPTOR(CallForwardVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallForwardVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallFunctionTemplateDescriptor : public CallInterfaceDescriptor {
+class CallFunctionTemplateDescriptor
+ : public StaticCallInterfaceDescriptor<CallFunctionTemplateDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kFunctionTemplateInfo, kArgumentsCount)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunctionTemplateInfo
MachineType::IntPtr()) // kArgumentsCount
- DECLARE_DESCRIPTOR(CallFunctionTemplateDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallFunctionTemplateDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
+class CallWithSpreadDescriptor
+ : public StaticCallInterfaceDescriptor<CallWithSpreadDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged()) // kSpread
- DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallWithSpreadDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
+class CallWithSpread_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<CallWithSpread_BaselineDescriptor> {
public:
- DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot)
+ DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged(), // kSpread
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(CallWithSpread_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallWithSpread_BaselineDescriptor)
};
-class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class CallWithSpread_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CallWithSpread_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot,
- kFeedbackVector)
+ kFeedbackVector, kReceiver)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged(), // kSpread
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ MachineType::AnyTagged(), // kFeedbackVector
+ MachineType::AnyTagged()) // kReceiver
+ DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor)
};
-class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
+class CallWithArrayLikeDescriptor
+ : public StaticCallInterfaceDescriptor<CallWithArrayLikeDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsList)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor)
+
+ static constexpr inline auto registers();
};
class CallWithArrayLike_WithFeedbackDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ CallWithArrayLike_WithFeedbackDescriptor> {
public:
- DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector)
+ DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector, kReceiver)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged(), // kArgumentsList
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ MachineType::AnyTagged(), // kFeedbackVector
+ MachineType::AnyTagged()) // kReceiver
+ DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor)
};
-class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
+class ConstructVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructVarargsDescriptor> {
public:
DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList)
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kArgumentsLength
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(ConstructVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
+class ConstructForwardVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructForwardVarargsDescriptor> {
public:
DEFINE_JS_PARAMETERS(kStartIndex)
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32())
- DECLARE_DESCRIPTOR(ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructForwardVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
+class ConstructWithSpreadDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructWithSpreadDescriptor> {
public:
DEFINE_JS_PARAMETERS(kSpread)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor)
+
+ static constexpr inline auto registers();
};
-class ConstructWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
+class ConstructWithSpread_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<
+ ConstructWithSpread_BaselineDescriptor> {
public:
// Note: kSlot comes before kSpread since as an untagged value it must be
// passed in a register.
- DEFINE_JS_PARAMETERS(kSlot, kSpread)
+ DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot, kSpread)
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kSpread
- DECLARE_DESCRIPTOR(ConstructWithSpread_BaselineDescriptor,
- CallInterfaceDescriptor)
-
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass spread through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ DECLARE_DESCRIPTOR(ConstructWithSpread_BaselineDescriptor)
};
class ConstructWithSpread_WithFeedbackDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ConstructWithSpread_WithFeedbackDescriptor> {
public:
// Note: kSlot comes before kSpread since as an untagged value it must be
// passed in a register.
@@ -1237,21 +1265,24 @@ class ConstructWithSpread_WithFeedbackDescriptor
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged(), // kSpread
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor)
};
-class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
+class ConstructWithArrayLikeDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructWithArrayLikeDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged(), // kNewTarget
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor)
+
+ static constexpr inline auto registers();
};
class ConstructWithArrayLike_WithFeedbackDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ConstructWithArrayLike_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
@@ -1259,38 +1290,44 @@ class ConstructWithArrayLike_WithFeedbackDescriptor
MachineType::AnyTagged(), // kArgumentsList
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor)
};
// TODO(ishell): consider merging this with ArrayConstructorDescriptor
-class ConstructStubDescriptor : public CallInterfaceDescriptor {
+class ConstructStubDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructStubDescriptor> {
public:
// TODO(jgruber): Remove the unused allocation site parameter.
DEFINE_JS_PARAMETERS(kAllocationSite)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
// TODO(ishell): Use DECLARE_JS_COMPATIBLE_DESCRIPTOR if registers match
- DECLARE_DESCRIPTOR(ConstructStubDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructStubDescriptor)
+
+ static constexpr inline auto registers();
};
-class AbortDescriptor : public CallInterfaceDescriptor {
+class AbortDescriptor : public StaticCallInterfaceDescriptor<AbortDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kMessageOrMessageId)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(AbortDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(AbortDescriptor)
+
+ static constexpr inline auto registers();
};
-class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayConstructorDescriptor
+ : public StaticJSCallInterfaceDescriptor<ArrayConstructorDescriptor> {
public:
DEFINE_JS_PARAMETERS(kAllocationSite)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(ArrayConstructorDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(ArrayConstructorDescriptor)
};
-class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayNArgumentsConstructorDescriptor
+ : public StaticCallInterfaceDescriptor<
+ ArrayNArgumentsConstructorDescriptor> {
public:
// This descriptor declares only register arguments while respective number
// of JS arguments stay on the expression stack.
@@ -1300,12 +1337,14 @@ class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction,
MachineType::AnyTagged(), // kAllocationSite
MachineType::Int32()) // kActualArgumentsCount
- DECLARE_DESCRIPTOR(ArrayNArgumentsConstructorDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ArrayNArgumentsConstructorDescriptor)
+
+ static constexpr auto registers();
};
class ArrayNoArgumentConstructorDescriptor
- : public ArrayNArgumentsConstructorDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ArrayNoArgumentConstructorDescriptor> {
public:
// This descriptor declares same register arguments as the parent
// ArrayNArgumentsConstructorDescriptor and it declares indices for
@@ -1316,12 +1355,14 @@ class ArrayNoArgumentConstructorDescriptor
MachineType::AnyTagged(), // kAllocationSite
MachineType::Int32(), // kActualArgumentsCount
MachineType::AnyTagged()) // kFunctionParameter
- DECLARE_DESCRIPTOR(ArrayNoArgumentConstructorDescriptor,
- ArrayNArgumentsConstructorDescriptor)
+ DECLARE_DESCRIPTOR(ArrayNoArgumentConstructorDescriptor)
+
+ static constexpr auto registers();
};
class ArraySingleArgumentConstructorDescriptor
- : public ArrayNArgumentsConstructorDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ArraySingleArgumentConstructorDescriptor> {
public:
// This descriptor declares same register arguments as the parent
// ArrayNArgumentsConstructorDescriptor and it declares indices for
@@ -1334,44 +1375,56 @@ class ArraySingleArgumentConstructorDescriptor
// JS arguments on the stack
MachineType::AnyTagged(), // kArraySizeSmiParameter
MachineType::AnyTagged()) // kReceiverParameter
- DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
- ArrayNArgumentsConstructorDescriptor)
+ DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor)
+
+ static constexpr auto registers();
};
-class CompareDescriptor : public CallInterfaceDescriptor {
+class CompareDescriptor
+ : public StaticCallInterfaceDescriptor<CompareDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight)
- DECLARE_DESCRIPTOR(CompareDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CompareDescriptor)
+
+ static constexpr inline auto registers();
};
-class BinaryOpDescriptor : public CallInterfaceDescriptor {
+class BinaryOpDescriptor
+ : public StaticCallInterfaceDescriptor<BinaryOpDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight)
- DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BinaryOpDescriptor)
+
+ static constexpr inline auto registers();
};
-class BinaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
+class BinaryOp_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<BinaryOp_BaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(BinaryOp_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BinaryOp_BaselineDescriptor)
+
+ static constexpr inline auto registers();
};
// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
// as they all have the same interface.
-class StringAtDescriptor final : public CallInterfaceDescriptor {
+class StringAtDescriptor final
+ : public StaticCallInterfaceDescriptor<StringAtDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
// TODO(turbofan): Return untagged value here.
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedSigned(), // result 1
MachineType::AnyTagged(), // kReceiver
MachineType::IntPtr()) // kPosition
- DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StringAtDescriptor)
};
-class StringAtAsStringDescriptor final : public CallInterfaceDescriptor {
+class StringAtAsStringDescriptor final
+ : public StaticCallInterfaceDescriptor<StringAtAsStringDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
// TODO(turbofan): Return untagged value here.
@@ -1379,10 +1432,11 @@ class StringAtAsStringDescriptor final : public CallInterfaceDescriptor {
MachineType::TaggedPointer(), // result string
MachineType::AnyTagged(), // kReceiver
MachineType::IntPtr()) // kPosition
- DECLARE_DESCRIPTOR(StringAtAsStringDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StringAtAsStringDescriptor)
};
-class StringSubstringDescriptor final : public CallInterfaceDescriptor {
+class StringSubstringDescriptor final
+ : public StaticCallInterfaceDescriptor<StringSubstringDescriptor> {
public:
DEFINE_PARAMETERS(kString, kFrom, kTo)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kString
@@ -1390,18 +1444,19 @@ class StringSubstringDescriptor final : public CallInterfaceDescriptor {
MachineType::IntPtr()) // kTo
// TODO(turbofan): Allow builtins to return untagged values.
- DECLARE_DESCRIPTOR(StringSubstringDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StringSubstringDescriptor)
};
-class CppBuiltinAdaptorDescriptor : public CallInterfaceDescriptor {
+class CppBuiltinAdaptorDescriptor
+ : public StaticJSCallInterfaceDescriptor<CppBuiltinAdaptorDescriptor> {
public:
DEFINE_JS_PARAMETERS(kCFunction)
DEFINE_JS_PARAMETER_TYPES(MachineType::Pointer())
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(CppBuiltinAdaptorDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(CppBuiltinAdaptorDescriptor)
};
-class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
+class CEntry1ArgvOnStackDescriptor
+ : public StaticCallInterfaceDescriptor<CEntry1ArgvOnStackDescriptor> {
public:
DEFINE_PARAMETERS(kArity, // register argument
kCFunction, // register argument
@@ -1415,10 +1470,13 @@ class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
MachineType::AnyTagged(), // kArgcSmi
MachineType::AnyTagged(), // kTargetCopy
MachineType::AnyTagged()) // kNewTargetCopy
- DECLARE_DESCRIPTOR(CEntry1ArgvOnStackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CEntry1ArgvOnStackDescriptor)
+
+ static constexpr auto registers();
};
-class ApiCallbackDescriptor : public CallInterfaceDescriptor {
+class ApiCallbackDescriptor
+ : public StaticCallInterfaceDescriptor<ApiCallbackDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount,
kCallData, kHolder)
@@ -1428,44 +1486,56 @@ class ApiCallbackDescriptor : public CallInterfaceDescriptor {
MachineType::IntPtr(), // kActualArgumentsCount
MachineType::AnyTagged(), // kCallData
MachineType::AnyTagged()) // kHolder
- DECLARE_DESCRIPTOR(ApiCallbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ApiCallbackDescriptor)
+
+ static constexpr inline auto registers();
};
-class ApiGetterDescriptor : public CallInterfaceDescriptor {
+class ApiGetterDescriptor
+ : public StaticCallInterfaceDescriptor<ApiGetterDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kHolder, kCallback)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kHolder
MachineType::AnyTagged()) // kCallback
- DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ApiGetterDescriptor)
- static const Register ReceiverRegister();
- static const Register HolderRegister();
- static const Register CallbackRegister();
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register HolderRegister();
+ static constexpr inline Register CallbackRegister();
+
+ static constexpr auto registers();
};
// TODO(turbofan): We should probably rename this to GrowFastElementsDescriptor.
-class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
+class GrowArrayElementsDescriptor
+ : public StaticCallInterfaceDescriptor<GrowArrayElementsDescriptor> {
public:
DEFINE_PARAMETERS(kObject, kKey)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kObject
MachineType::AnyTagged()) // kKey
- DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor)
+
+ static constexpr inline Register ObjectRegister();
+ static constexpr inline Register KeyRegister();
- static const Register ObjectRegister();
- static const Register KeyRegister();
+ static constexpr auto registers();
};
class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ TailCallOptimizedCodeSlotDescriptor> {
public:
DEFINE_PARAMETERS(kOptimizedCodeEntry)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kAccumulator
- DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor)
+
+ static constexpr auto registers();
};
-class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
+class BaselineOutOfLinePrologueDescriptor
+ : public StaticCallInterfaceDescriptor<
+ BaselineOutOfLinePrologueDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure,
kJavaScriptCallArgCount, kStackFrameSize,
@@ -1477,32 +1547,31 @@ class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kStackFrameSize
MachineType::AnyTagged(), // kJavaScriptCallNewTarget
MachineType::AnyTagged()) // kInterpreterBytecodeArray
- DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor)
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
+ static constexpr inline auto registers();
- // Pass bytecode array through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ // We pass the context manually, so we have one extra register.
+ static constexpr int kMaxRegisterParams =
+ StaticCallInterfaceDescriptor::kMaxRegisterParams + 1;
};
-class BaselineLeaveFrameDescriptor : public CallInterfaceDescriptor {
+class BaselineLeaveFrameDescriptor
+ : public StaticCallInterfaceDescriptor<BaselineLeaveFrameDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kParamsSize, kWeight)
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kParamsSize
MachineType::Int32()) // kWeight
- DECLARE_DESCRIPTOR(BaselineLeaveFrameDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BaselineLeaveFrameDescriptor)
+
+ static constexpr inline Register ParamsSizeRegister();
+ static constexpr inline Register WeightRegister();
- static const Register ParamsSizeRegister();
- static const Register WeightRegister();
+ static constexpr inline auto registers();
};
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<InterpreterDispatchDescriptor> {
public:
DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
kDispatchTable)
@@ -1510,21 +1579,27 @@ class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
MachineType::IntPtr(), // kBytecodeOffset
MachineType::AnyTagged(), // kBytecodeArray
MachineType::IntPtr()) // kDispatchTable
- DECLARE_DESCRIPTOR(InterpreterDispatchDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterDispatchDescriptor)
+
+ static constexpr inline auto registers();
};
-class InterpreterPushArgsThenCallDescriptor : public CallInterfaceDescriptor {
+class InterpreterPushArgsThenCallDescriptor
+ : public StaticCallInterfaceDescriptor<
+ InterpreterPushArgsThenCallDescriptor> {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunction)
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments
MachineType::Pointer(), // kFirstArgument
MachineType::AnyTagged()) // kFunction
- DECLARE_DESCRIPTOR(InterpreterPushArgsThenCallDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterPushArgsThenCallDescriptor)
+
+ static constexpr inline auto registers();
};
class InterpreterPushArgsThenConstructDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ InterpreterPushArgsThenConstructDescriptor> {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kConstructor,
kNewTarget, kFeedbackElement)
@@ -1533,20 +1608,13 @@ class InterpreterPushArgsThenConstructDescriptor
MachineType::AnyTagged(), // kConstructor
MachineType::AnyTagged(), // kNewTarget
MachineType::AnyTagged()) // kFeedbackElement
- DECLARE_DESCRIPTOR(InterpreterPushArgsThenConstructDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterPushArgsThenConstructDescriptor)
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass constructor, new target and feedback element through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr inline auto registers();
};
-class InterpreterCEntry1Descriptor : public CallInterfaceDescriptor {
+class InterpreterCEntry1Descriptor
+ : public StaticCallInterfaceDescriptor<InterpreterCEntry1Descriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(1, kNumberOfArguments, kFirstArgument,
kFunctionEntry)
@@ -1554,10 +1622,13 @@ class InterpreterCEntry1Descriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kNumberOfArguments
MachineType::Pointer(), // kFirstArgument
MachineType::Pointer()) // kFunctionEntry
- DECLARE_DESCRIPTOR(InterpreterCEntry1Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterCEntry1Descriptor)
+
+ static constexpr auto registers();
};
-class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
+class InterpreterCEntry2Descriptor
+ : public StaticCallInterfaceDescriptor<InterpreterCEntry2Descriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(2, kNumberOfArguments, kFirstArgument,
kFunctionEntry)
@@ -1566,10 +1637,13 @@ class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kNumberOfArguments
MachineType::Pointer(), // kFirstArgument
MachineType::Pointer()) // kFunctionEntry
- DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor)
+
+ static constexpr auto registers();
};
-class ForInPrepareDescriptor : public CallInterfaceDescriptor {
+class ForInPrepareDescriptor
+ : public StaticCallInterfaceDescriptor<ForInPrepareDescriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(2, kEnumerator, kVectorIndex, kFeedbackVector)
DEFINE_RESULT_AND_PARAMETER_TYPES(
@@ -1578,120 +1652,133 @@ class ForInPrepareDescriptor : public CallInterfaceDescriptor {
MachineType::AnyTagged(), // kEnumerator
MachineType::TaggedSigned(), // kVectorIndex
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ForInPrepareDescriptor)
};
-class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
+class ResumeGeneratorDescriptor final
+ : public StaticCallInterfaceDescriptor<ResumeGeneratorDescriptor> {
public:
DEFINE_PARAMETERS(kValue, kGenerator)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::AnyTagged()) // kGenerator
- DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor)
+
+ static constexpr inline auto registers();
};
-class ResumeGeneratorBaselineDescriptor final : public CallInterfaceDescriptor {
+class ResumeGeneratorBaselineDescriptor final
+ : public StaticCallInterfaceDescriptor<ResumeGeneratorBaselineDescriptor> {
public:
- DEFINE_PARAMETERS(kGeneratorObject, kRegisterCount)
+ DEFINE_PARAMETERS_NO_CONTEXT(kGeneratorObject, kRegisterCount)
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::TaggedSigned(), // return type
MachineType::AnyTagged(), // kGeneratorObject
MachineType::IntPtr(), // kRegisterCount
)
- DECLARE_DESCRIPTOR(ResumeGeneratorBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ResumeGeneratorBaselineDescriptor)
};
class SuspendGeneratorBaselineDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<SuspendGeneratorBaselineDescriptor> {
public:
- DEFINE_PARAMETERS(kGeneratorObject, kSuspendId, kBytecodeOffset,
- kRegisterCount)
+ DEFINE_PARAMETERS_NO_CONTEXT(kGeneratorObject, kSuspendId, kBytecodeOffset,
+ kRegisterCount)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kGeneratorObject
MachineType::IntPtr(), // kSuspendId
MachineType::IntPtr(), // kBytecodeOffset
MachineType::IntPtr(), // kRegisterCount
)
- DECLARE_DESCRIPTOR(SuspendGeneratorBaselineDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(SuspendGeneratorBaselineDescriptor)
};
-class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kRestartFp)
- DEFINE_PARAMETER_TYPES(MachineType::Pointer())
- DECLARE_DESCRIPTOR(FrameDropperTrampolineDescriptor, CallInterfaceDescriptor)
-};
-
-class RunMicrotasksEntryDescriptor final : public CallInterfaceDescriptor {
+class RunMicrotasksEntryDescriptor final
+ : public StaticCallInterfaceDescriptor<RunMicrotasksEntryDescriptor> {
public:
DEFINE_PARAMETERS_ENTRY(kRootRegisterValue, kMicrotaskQueue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kRootRegisterValue
MachineType::Pointer()) // kMicrotaskQueue
- DECLARE_DESCRIPTOR(RunMicrotasksEntryDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(RunMicrotasksEntryDescriptor)
+
+ static constexpr inline auto registers();
};
-class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
+class RunMicrotasksDescriptor final
+ : public StaticCallInterfaceDescriptor<RunMicrotasksDescriptor> {
public:
DEFINE_PARAMETERS(kMicrotaskQueue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer())
- DECLARE_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(RunMicrotasksDescriptor)
- static Register MicrotaskQueueRegister();
+ static constexpr inline Register MicrotaskQueueRegister();
};
-class WasmFloat32ToNumberDescriptor final : public CallInterfaceDescriptor {
+class WasmFloat32ToNumberDescriptor final
+ : public StaticCallInterfaceDescriptor<WasmFloat32ToNumberDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kValue)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
MachineType::Float32()) // value
- DECLARE_DESCRIPTOR(WasmFloat32ToNumberDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmFloat32ToNumberDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ // We need a custom descriptor on ia32 to avoid using xmm0.
+ static constexpr inline auto registers();
+#endif
};
-class WasmFloat64ToNumberDescriptor final : public CallInterfaceDescriptor {
+class WasmFloat64ToNumberDescriptor final
+ : public StaticCallInterfaceDescriptor<WasmFloat64ToNumberDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kValue)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
MachineType::Float64()) // value
- DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ // We need a custom descriptor on ia32 to avoid using xmm0.
+ static constexpr inline auto registers();
+#endif
};
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<I64ToBigIntDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::Int64()) // kArgument
- DECLARE_DESCRIPTOR(I64ToBigIntDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(I64ToBigIntDescriptor)
};
// 32 bits version of the I64ToBigIntDescriptor call interface descriptor
class V8_EXPORT_PRIVATE I32PairToBigIntDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<I32PairToBigIntDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kLow, kHigh)
DEFINE_PARAMETER_TYPES(MachineType::Uint32(), // kLow
MachineType::Uint32()) // kHigh
- DECLARE_DESCRIPTOR(I32PairToBigIntDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(I32PairToBigIntDescriptor)
};
class V8_EXPORT_PRIVATE BigIntToI64Descriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<BigIntToI64Descriptor> {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int64(), // result 1
MachineType::AnyTagged()) // kArgument
- DECLARE_DESCRIPTOR(BigIntToI64Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BigIntToI64Descriptor)
};
class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<BigIntToI32PairDescriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(2, kArgument)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // result 2
MachineType::AnyTagged()) // kArgument
- DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor)
};
-class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
+class WasmI32AtomicWait32Descriptor final
+ : public StaticCallInterfaceDescriptor<WasmI32AtomicWait32Descriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
kTimeoutHigh)
@@ -1700,36 +1787,30 @@ class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
MachineType::Int32(), // kExpectedValue
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
- DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor)
};
-class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
+class WasmI64AtomicWait32Descriptor final
+ : public StaticCallInterfaceDescriptor<WasmI64AtomicWait32Descriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
kTimeoutLow, kTimeoutHigh)
- DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(
- CallInterfaceDescriptorData::kNoStackScan, // allow untagged stack params
- MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint32(), // kExpectedValueLow
- MachineType::Uint32(), // kExpectedValueHigh
- MachineType::Uint32(), // kTimeoutLow
- MachineType::Uint32()) // kTimeoutHigh
+ static constexpr bool kNoStackScan = true;
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Uint32(), // kExpectedValueLow
+ MachineType::Uint32(), // kExpectedValueHigh
+ MachineType::Uint32(), // kTimeoutLow
+ MachineType::Uint32()) // kTimeoutHigh
- DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor)
};
-class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
+class CloneObjectWithVectorDescriptor final
+ : public StaticCallInterfaceDescriptor<CloneObjectWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
@@ -1737,108 +1818,142 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
MachineType::TaggedSigned(), // kFlags
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor)
};
-class CloneObjectBaselineDescriptor final : public CallInterfaceDescriptor {
+class CloneObjectBaselineDescriptor final
+ : public StaticCallInterfaceDescriptor<CloneObjectBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kSource, kFlags, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource
MachineType::TaggedSigned(), // kFlags
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(CloneObjectBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CloneObjectBaselineDescriptor)
};
-class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class BinaryOp_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<BinaryOp_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor)
+};
+
+class CallTrampoline_Baseline_CompactDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CallTrampoline_Baseline_CompactDescriptor> {
+ public:
+ using ArgumentCountField = base::BitField<uint32_t, 0, 8>;
+ using SlotField = base::BitField<uintptr_t, 8, 24>;
+
+ static bool EncodeBitField(uint32_t argc, uintptr_t slot, uint32_t* out) {
+ if (ArgumentCountField::is_valid(argc) && SlotField::is_valid(slot)) {
+ *out = ArgumentCountField::encode(argc) | SlotField::encode(slot);
+ return true;
+ }
+ return false;
+ }
+
+ DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kFunction, kBitField)
+ DEFINE_PARAMETER_TYPES(
+ MachineType::AnyTagged(), // kFunction
+ MachineType::Uint32()) // kBitField = ArgumentCountField | SlotField
+ DECLARE_DESCRIPTOR(CallTrampoline_Baseline_CompactDescriptor)
};
-class CallTrampoline_BaselineDescriptor : public CallInterfaceDescriptor {
+class CallTrampoline_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<CallTrampoline_BaselineDescriptor> {
public:
- DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot)
+ DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kFunction, kActualArgumentsCount, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32(), // kActualArgumentsCount
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(CallTrampoline_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallTrampoline_BaselineDescriptor)
};
-class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class CallTrampoline_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CallTrampoline_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
- kFeedbackVector)
+ kFeedbackVector, kReceiver)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32(), // kActualArgumentsCount
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ MachineType::AnyTagged(), // kFeedbackVector
+ MachineType::AnyTagged()) // kReceiver
+ DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor)
};
-class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class Compare_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<Compare_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor)
};
-class Compare_BaselineDescriptor : public CallInterfaceDescriptor {
+class Compare_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<Compare_BaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(Compare_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(Compare_BaselineDescriptor)
+
+ static constexpr inline auto registers();
};
-class Construct_BaselineDescriptor : public CallInterfaceDescriptor {
+class Construct_BaselineDescriptor
+ : public StaticJSCallInterfaceDescriptor<Construct_BaselineDescriptor> {
public:
DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot)
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr()) // kSlot
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor)
};
-class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class Construct_WithFeedbackDescriptor
+ : public StaticJSCallInterfaceDescriptor<Construct_WithFeedbackDescriptor> {
public:
// kSlot is passed in a register, kFeedbackVector on the stack.
DEFINE_JS_PARAMETERS(kSlot, kFeedbackVector)
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor)
};
-class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class UnaryOp_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<UnaryOp_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kValue, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor)
};
-class UnaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
+class UnaryOp_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<UnaryOp_BaselineDescriptor> {
public:
- DEFINE_PARAMETERS(kValue, kSlot)
+ DEFINE_PARAMETERS_NO_CONTEXT(kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor)
};
-#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
- class Name##Descriptor : public CallInterfaceDescriptor { \
- public: \
- DEFINE_PARAMETERS(__VA_ARGS__) \
- DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor, CallInterfaceDescriptor) \
+#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
+ class Name##Descriptor \
+ : public StaticCallInterfaceDescriptor<Name##Descriptor> { \
+ public: \
+ DEFINE_PARAMETERS(__VA_ARGS__) \
+ DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor) \
};
BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DEFINE_TFS_BUILTIN_DESCRIPTOR
@@ -1852,11 +1967,12 @@ BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
#undef DECLARE_JS_COMPATIBLE_DESCRIPTOR
-#undef DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS
#undef DEFINE_RESULT_AND_PARAMETERS
+#undef DEFINE_PARAMETERS_ENTRY
#undef DEFINE_PARAMETERS
#undef DEFINE_PARAMETERS_VARARGS
#undef DEFINE_PARAMETERS_NO_CONTEXT
+#undef DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT
#undef DEFINE_RESULT_AND_PARAMETER_TYPES
#undef DEFINE_PARAMETER_TYPES
#undef DEFINE_JS_PARAMETERS
diff --git a/deps/v8/src/codegen/machine-type.cc b/deps/v8/src/codegen/machine-type.cc
index 86fc480ea5..1972c41b24 100644
--- a/deps/v8/src/codegen/machine-type.cc
+++ b/deps/v8/src/codegen/machine-type.cc
@@ -55,6 +55,8 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepCompressedPointer";
case MachineRepresentation::kCompressed:
return "kRepCompressed";
+ case MachineRepresentation::kMapWord:
+ return "kRepMapWord";
}
UNREACHABLE();
}
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index ac21d3c3e6..7a00608459 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -22,6 +22,19 @@ enum class MachineRepresentation : uint8_t {
kWord16,
kWord32,
kWord64,
+ // (uncompressed) MapWord
+ // kMapWord is the representation of a map word, i.e. a map in the header
+ // of a HeapObject.
+ // If V8_MAP_PACKING is disabled, a map word is just the map itself. Hence
+ // kMapWord is equivalent to kTaggedPointer -- in fact it will be
+ // translated to kTaggedPointer during memory lowering.
+ // If V8_MAP_PACKING is enabled, a map word is a Smi-like encoding of a map
+ // and some meta data. Memory lowering of kMapWord loads/stores
+ // produces low-level kTagged loads/stores plus the necessary
+ // decode/encode operations.
+ // In either case, the kMapWord representation is not used after memory
+ // lowering.
+ kMapWord,
kTaggedSigned, // (uncompressed) Smi
kTaggedPointer, // (uncompressed) HeapObject
kTagged, // (uncompressed) Object (Smi or HeapObject)
@@ -102,6 +115,10 @@ class MachineType {
return representation() == MachineRepresentation::kNone;
}
+ constexpr bool IsMapWord() const {
+ return representation() == MachineRepresentation::kMapWord;
+ }
+
constexpr bool IsSigned() const {
return semantic() == MachineSemantic::kInt32 ||
semantic() == MachineSemantic::kInt64;
@@ -187,6 +204,9 @@ class MachineType {
return MachineType(MachineRepresentation::kTaggedPointer,
MachineSemantic::kAny);
}
+ constexpr static MachineType MapInHeader() {
+ return MachineType(MachineRepresentation::kMapWord, MachineSemantic::kAny);
+ }
constexpr static MachineType TaggedSigned() {
return MachineType(MachineRepresentation::kTaggedSigned,
MachineSemantic::kInt32);
@@ -283,7 +303,8 @@ inline bool IsFloatingPoint(MachineRepresentation rep) {
inline bool CanBeTaggedPointer(MachineRepresentation rep) {
return rep == MachineRepresentation::kTagged ||
- rep == MachineRepresentation::kTaggedPointer;
+ rep == MachineRepresentation::kTaggedPointer ||
+ rep == MachineRepresentation::kMapWord;
}
inline bool CanBeTaggedSigned(MachineRepresentation rep) {
@@ -328,16 +349,12 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeLog2Of(
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kMapWord:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
return kTaggedSizeLog2;
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
-#else
- // Return something for older compilers.
- return -1;
-#endif
}
}
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index ce3ccbf332..484ec9e4b2 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -10,7 +10,7 @@
#include "src/heap/heap.h"
// Helper types to make boolean flag easier to read at call-site.
-enum InvokeFlag { CALL_FUNCTION, JUMP_FUNCTION };
+enum class InvokeType { kCall, kJump };
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -28,6 +28,10 @@ enum AllocationFlags {
PRETENURE = 1 << 3,
};
+enum class RememberedSetAction { kOmit, kEmit };
+
+enum class SmiCheck { kOmit, kInline };
+
// This is the only place allowed to include the platform-specific headers.
#define INCLUDED_FROM_MACRO_ASSEMBLER_H
#if V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 2ef08ae87c..c254860b14 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -261,29 +261,27 @@ static const int kNegOffset = 0x00008000;
// operations as post-increment of sp.
const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (kPointerSize & kImm16Mask); // NOLINT
+ (kPointerSize & kImm16Mask);
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (-kPointerSize & kImm16Mask); // NOLINT
+ (-kPointerSize & kImm16Mask);
// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern =
- SW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern = SW | (sp.code() << kRsShift) | (0 & kImm16Mask);
// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern =
- LW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern = LW | (sp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpOffsetPattern =
- LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kSwRegFpOffsetPattern =
- SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpNegOffsetPattern =
- LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
const Instr kSwRegFpNegOffsetPattern =
- SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xFFE00000;
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index ccdea03a79..47bdf26d55 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -1907,7 +1907,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
class EnsureSpace {
public:
- explicit inline EnsureSpace(Assembler* assembler);
+ explicit V8_INLINE EnsureSpace(Assembler* assembler);
};
class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
new file mode 100644
index 0000000000..edea1b3844
--- /dev/null
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
@@ -0,0 +1,258 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
+#define V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, t0);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return t0;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return t0; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return t1; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return a3;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return t0;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // t0 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, t0, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // t0 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, t0, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // t4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, t4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(v0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_MIPS
+
+#endif // V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
deleted file mode 100644
index f41a0e14ca..0000000000
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-// On MIPS it is not allowed to use odd numbered floating point registers
-// (e.g. f1, f3, etc.) for parameters. This can happen if we use
-// DefaultInitializePlatformSpecific to assign float registers for parameters.
-// E.g if fourth parameter goes to float register, f7 would be assigned for
-// parameter (a3 casted to int is 7).
-bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
- return reg.code() % 2 == 0;
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a0, a1, a2, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return t0;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return t0; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return t0; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return t1; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // t0 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, t0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // t0 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, t0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- t4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- v0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 8bbdbca662..d48b441c7b 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -64,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -89,7 +90,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -100,7 +101,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -179,7 +180,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -188,7 +189,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Addu(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
@@ -198,13 +199,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
}
@@ -336,7 +337,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
lw(scratch, MemOperand(address));
@@ -344,7 +345,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -354,7 +355,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -379,7 +380,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
@@ -4176,14 +4177,6 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- lw(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4373,7 +4366,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -4484,9 +4477,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -4500,19 +4493,21 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
- Call(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
- Jump(code);
+ switch (type) {
+ case InvokeType::kCall:
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Call(code);
+ break;
+ case InvokeType::kJump:
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Jump(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -4522,9 +4517,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -4538,15 +4533,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(function, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -4555,7 +4550,7 @@ void MacroAssembler::InvokeFunction(Register function,
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -4699,8 +4694,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
@@ -4746,7 +4741,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -4761,11 +4756,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -4885,7 +4880,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -4999,7 +4994,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -5032,7 +5027,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5042,7 +5037,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5052,7 +5047,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5067,7 +5062,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5083,7 +5078,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5096,7 +5091,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5123,7 +5118,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -5416,7 +5411,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
#if V8_HOST_ARCH_MIPS
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index 8d54e0b737..8a82eea6fa 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -45,8 +45,6 @@ enum LiFlags {
CONSTANT_SIZE = 1
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -974,8 +972,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -983,8 +981,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
void Pref(int32_t hint, const MemOperand& rs);
@@ -1016,7 +1014,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1027,13 +1025,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1057,18 +1052,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
@@ -1155,7 +1150,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 7f7ebd2c73..70a02ddb6f 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -235,29 +235,27 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// operations as post-increment of sp.
const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (kPointerSize & kImm16Mask); // NOLINT
+ (kPointerSize & kImm16Mask);
// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (-kPointerSize & kImm16Mask); // NOLINT
+ (-kPointerSize & kImm16Mask);
// Sd(r, MemOperand(sp, 0))
-const Instr kPushRegPattern =
- SD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern = SD | (sp.code() << kRsShift) | (0 & kImm16Mask);
// Ld(r, MemOperand(sp, 0))
-const Instr kPopRegPattern =
- LD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern = LD | (sp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpOffsetPattern =
- LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kSwRegFpOffsetPattern =
- SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpNegOffsetPattern =
- LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
const Instr kSwRegFpNegOffsetPattern =
- SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xFFE00000;
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
new file mode 100644
index 0000000000..62e32776ef
--- /dev/null
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
@@ -0,0 +1,258 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MIPS64_INTERFACE_DESCRIPTORS_MIPS64_INL_H_
+#define V8_CODEGEN_MIPS64_INTERFACE_DESCRIPTORS_MIPS64_INL_H_
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return a3;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return a4;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, a4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(v0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_MIPS64
+
+#endif // V8_CODEGEN_MIPS64_INTERFACE_DESCRIPTORS_MIPS64_INL_H_
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
deleted file mode 100644
index f34d16e15b..0000000000
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-// On MIPS it is not allowed to use odd numbered floating point registers
-// (e.g. f1, f3, etc.) for parameters. This can happen if we use
-// DefaultInitializePlatformSpecific to assign float registers for parameters.
-// E.g if fourth parameter goes to float register, f7 would be assigned for
-// parameter (a3 casted to int is 7).
-bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
- return reg.code() % 2 == 0;
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a0, a1, a2, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return a4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return a4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- v0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 29443a2e58..a1896624e5 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -64,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -89,7 +90,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -100,7 +101,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -177,7 +178,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -186,7 +187,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Daddu(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
@@ -196,13 +197,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
@@ -334,7 +335,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ld(scratch, MemOperand(address));
@@ -342,7 +343,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -352,7 +353,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -377,7 +378,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
@@ -4687,14 +4688,6 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- Ld(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4887,7 +4880,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -4999,9 +4992,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -5015,19 +5008,21 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(code);
+ switch (type) {
+ case InvokeType::kCall:
+ Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(code);
+ break;
+ case InvokeType::kJump:
+ Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Jump(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -5037,9 +5032,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -5053,15 +5048,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(a1, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -5070,7 +5065,7 @@ void MacroAssembler::InvokeFunction(Register function,
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -5214,8 +5209,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
@@ -5267,7 +5262,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -5282,11 +5277,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -5405,7 +5400,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -5521,7 +5516,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -5566,7 +5561,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5576,7 +5571,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5586,7 +5581,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5601,7 +5596,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5617,7 +5612,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5630,7 +5625,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5657,7 +5652,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -5949,7 +5944,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 756b594edb..054f3345d1 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -55,8 +55,6 @@ enum LiFlags {
ADDRESS_LOAD = 2
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -994,8 +992,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1003,8 +1001,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
void Pref(int32_t hint, const MemOperand& rs);
@@ -1072,7 +1070,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1083,12 +1081,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1112,18 +1107,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1230,7 +1225,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index f6fd5862fd..4d5c7a1d57 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -23,12 +23,16 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure, CodeKind code_kind)
+ Handle<JSFunction> closure, CodeKind code_kind, BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame)
: code_kind_(code_kind),
+ osr_offset_(osr_offset),
+ osr_frame_(osr_frame),
zone_(zone),
optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
+ DCHECK_IMPLIES(is_osr(), IsOptimizing());
bytecode_array_ = handle(shared->GetBytecodeArray(isolate), isolate);
shared_info_ = shared;
closure_ = closure;
@@ -64,8 +68,6 @@ bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
switch (flag) {
case kPoisonRegisterArguments:
return untrusted_code_mitigations();
- case kFunctionContextSpecializing:
- return !IsNativeContextIndependent();
default:
return true;
}
@@ -86,18 +88,22 @@ bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
+ if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
+
+ if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
+ set_concurrent_inlining();
+ }
switch (code_kind_) {
case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
set_function_context_specializing();
}
+ if (FLAG_turbo_splitting) set_splitting();
V8_FALLTHROUGH;
case CodeKind::TURBOPROP:
- case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
set_called_with_code_start_register();
set_switch_jump_table();
- if (FLAG_turbo_splitting) set_splitting();
if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 20386cbbee..b5ad1c9816 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -70,7 +70,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(TraceTurboAllocation, trace_turbo_allocation, 16) \
V(TraceHeapBroker, trace_heap_broker, 17) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
- V(ConcurrentInlining, concurrent_inlining, 19)
+ V(ConcurrentInlining, concurrent_inlining, 19) \
+ V(DiscardResultForTesting, discard_result_for_testing, 20) \
+ V(InlineJSWasmCalls, inline_js_wasm_calls, 21)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@@ -102,7 +104,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure, CodeKind code_kind);
+ Handle<JSFunction> closure, CodeKind code_kind,
+ BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame);
+ // For testing.
+ OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> closure, CodeKind code_kind)
+ : OptimizedCompilationInfo(zone, isolate, shared, closure, code_kind,
+ BytecodeOffset::None(), nullptr) {}
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
CodeKind code_kind);
@@ -160,21 +170,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsOptimizing() const {
return CodeKindIsOptimizedJSFunction(code_kind());
}
- bool IsNativeContextIndependent() const {
- return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
- }
bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
#if V8_ENABLE_WEBASSEMBLY
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
#endif // V8_ENABLE_WEBASSEMBLY
- void SetOptimizingForOsr(BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame) {
- DCHECK(IsOptimizing());
- osr_offset_ = osr_offset;
- osr_frame_ = osr_frame;
- }
-
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
@@ -293,7 +293,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#endif // V8_ENABLE_WEBASSEMBLY
// Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
- BytecodeOffset osr_offset_ = BytecodeOffset::None();
+ const BytecodeOffset osr_offset_ = BytecodeOffset::None();
+ // The current OSR frame for specialization or {nullptr}.
+ JavaScriptFrame* const osr_frame_ = nullptr;
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
@@ -309,9 +311,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
- // The current OSR frame for specialization or {nullptr}.
- JavaScriptFrame* osr_frame_ = nullptr;
-
Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 7da9484cce..437f5f96c6 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -55,7 +55,11 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
bool CpuFeatures::SupportsWasmSimd128() {
+#if V8_ENABLE_WEBASSEMBLY
return CpuFeatures::IsSupported(SIMD);
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
void CpuFeatures::ProbeImpl(bool cross_compile) {
@@ -1824,6 +1828,12 @@ void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
TX);
}
+void Assembler::lxvx(const Simd128Register rt, const MemOperand& src) {
+ int TX = 1;
+ emit(LXVX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
+ TX);
+}
+
void Assembler::lxsdx(const Simd128Register rt, const MemOperand& src) {
int TX = 1;
emit(LXSDX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
@@ -1878,18 +1888,18 @@ void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
SX);
}
+void Assembler::stxvx(const Simd128Register rt, const MemOperand& dst) {
+ int SX = 1;
+ emit(STXVX | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
+ SX);
+}
+
void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
int TX = 1;
CHECK(is_uint8(imm.immediate()));
emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
}
-void Assembler::xxbrq(const Simd128Register rt, const Simd128Register rb) {
- int BX = 1;
- int TX = 1;
- emit(XXBRQ | rt.code() * B21 | 31 * B16 | rb.code() * B11 | BX * B1 | TX);
-}
-
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index d5b37fe59f..1d7ecf76d7 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -448,6 +448,7 @@ class Assembler : public AssemblerBase {
}
PPC_XX2_OPCODE_A_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
+ PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
#undef DECLARE_PPC_XX2_INSTRUCTIONS
#define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
@@ -500,6 +501,9 @@ class Assembler : public AssemblerBase {
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
PPC_VX_OPCODE_C_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_C_FORM)
+ PPC_VX_OPCODE_D_FORM_LIST(
+ DECLARE_PPC_VX_INSTRUCTIONS_C_FORM) /* OPCODE_D_FORM can use
+ INSTRUCTIONS_C_FORM */
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_C_FORM
@@ -1028,6 +1032,7 @@ class Assembler : public AssemblerBase {
void mtvsrd(const Simd128Register rt, const Register ra);
void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb);
void lxvd(const Simd128Register rt, const MemOperand& src);
+ void lxvx(const Simd128Register rt, const MemOperand& src);
void lxsdx(const Simd128Register rt, const MemOperand& src);
void lxsibzx(const Simd128Register rt, const MemOperand& src);
void lxsihzx(const Simd128Register rt, const MemOperand& src);
@@ -1037,8 +1042,8 @@ class Assembler : public AssemblerBase {
void stxsihx(const Simd128Register rs, const MemOperand& src);
void stxsiwx(const Simd128Register rs, const MemOperand& src);
void stxvd(const Simd128Register rt, const MemOperand& src);
+ void stxvx(const Simd128Register rt, const MemOperand& src);
void xxspltib(const Simd128Register rt, const Operand& imm);
- void xxbrq(const Simd128Register rt, const Simd128Register rb);
// Pseudo instructions
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 5b37a2ee11..56732b7f8b 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -418,6 +418,10 @@ using Instr = uint32_t;
/* Saturate */ \
V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
+#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
+ /* Vector Byte-Reverse Quadword */ \
+ V(xxbrq, XXBRQ, 0xF01F076C)
+
#define PPC_XX2_OPCODE_UNUSED_LIST(V) \
/* VSX Scalar Square Root Double-Precision */ \
V(xssqrtdp, XSSQRTDP, 0xF000012C) \
@@ -520,12 +524,11 @@ using Instr = uint32_t;
/* VSX Vector Test for software Square Root Single-Precision */ \
V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) \
/* Vector Splat Immediate Byte */ \
- V(xxspltib, XXSPLTIB, 0xF00002D0) \
- /* Vector Byte-Reverse Quadword */ \
- V(xxbrq, XXBRQ, 0xF000076C)
+ V(xxspltib, XXSPLTIB, 0xF00002D0)
#define PPC_XX2_OPCODE_LIST(V) \
PPC_XX2_OPCODE_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_B_FORM_LIST(V) \
PPC_XX2_OPCODE_UNUSED_LIST(V)
#define PPC_EVX_OPCODE_LIST(V) \
@@ -1983,6 +1986,8 @@ using Instr = uint32_t;
V(lxsspx, LXSSPX, 0x7C000418) \
/* Load VSR Vector Doubleword*2 Indexed */ \
V(lxvd, LXVD, 0x7C000698) \
+ /* Load VSX Vector Indexed */ \
+ V(lxvx, LXVX, 0x7C000218) \
/* Load VSR Vector Doubleword & Splat Indexed */ \
V(lxvdsx, LXVDSX, 0x7C000298) \
/* Load VSR Vector Word*4 Indexed */ \
@@ -2011,6 +2016,8 @@ using Instr = uint32_t;
V(stxsspx, STXSSPX, 0x7C000518) \
/* Store VSR Vector Doubleword*2 Indexed */ \
V(stxvd, STXVD, 0x7C000798) \
+ /* Store VSX Vector Indexed */ \
+ V(stxvx, STXVX, 0x7C000318) \
/* Store VSR Vector Word*4 Indexed */ \
V(stxvw, STXVW, 0x7C000718)
@@ -2430,6 +2437,12 @@ using Instr = uint32_t;
/* Vector Population Count Byte */ \
V(vpopcntb, VPOPCNTB, 0x10000703)
+#define PPC_VX_OPCODE_D_FORM_LIST(V) \
+ /* Vector Negate Word */ \
+ V(vnegw, VNEGW, 0x10060602) \
+ /* Vector Negate Doubleword */ \
+ V(vnegd, VNEGD, 0x10070602)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2586,6 +2599,7 @@ using Instr = uint32_t;
PPC_VX_OPCODE_A_FORM_LIST(V) \
PPC_VX_OPCODE_B_FORM_LIST(V) \
PPC_VX_OPCODE_C_FORM_LIST(V) \
+ PPC_VX_OPCODE_D_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
@@ -2919,9 +2933,19 @@ class Instruction {
PPC_VA_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
+ // Some VX opcodes have integers hard coded in the middle, handle those
+ // first.
+ opcode = extcode | BitField(20, 16) | BitField(10, 0);
+ switch (opcode) {
+ PPC_VX_OPCODE_D_FORM_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(10, 0);
switch (opcode) {
- PPC_VX_OPCODE_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_B_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_C_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_UNUSED_LIST(OPCODE_CASES)
PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
@@ -2935,9 +2959,17 @@ class Instruction {
PPC_XFX_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
+ // Some XX2 opcodes have integers hard coded in the middle, handle those
+ // first.
+ opcode = extcode | BitField(20, 16) | BitField(10, 2);
+ switch (opcode) {
+ PPC_XX2_OPCODE_B_FORM_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(10, 2);
switch (opcode) {
- PPC_XX2_OPCODE_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 1);
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
new file mode 100644
index 0000000000..69529a3ce6
--- /dev/null
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_PPC_INTERFACE_DESCRIPTORS_PPC_INL_H_
+#define V8_CODEGEN_PPC_INTERFACE_DESCRIPTORS_PPC_INL_H_
+
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r3, r4, r5, r6, r7);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r3, r4, r5, r6, r7, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r3, r4, r5, r6, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r3, r4, r5, r6, r7, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r4; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r5; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r3; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r7;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r4; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r5; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r3; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r7; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r8; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r3; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r6; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r6;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r7;
+}
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r6); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r3 : number of arguments
+ // r4 : the target to call
+ return RegisterArray(r4, r3);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r7 : arguments list length (untagged)
+ // r5 : arguments list (FixedArray)
+ return RegisterArray(r4, r3, r7, r5);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r3 : number of arguments
+ // r5 : start index (to support rest parameters)
+ // r4 : the target to call
+ return RegisterArray(r4, r3, r5);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r4 : function template info
+ // r5 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r4, r5);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r5 : the object to spread
+ return RegisterArray(r4, r3, r5);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r4 : the target to call
+ // r5 : the arguments list
+ return RegisterArray(r4, r5);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r6 : the new target
+ // r7 : arguments list length (untagged)
+ // r5 : arguments list (FixedArray)
+ return RegisterArray(r4, r6, r3, r7, r5);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r3 : number of arguments
+ // r6 : the new target
+ // r5 : start index (to support rest parameters)
+ // r4 : the target to call
+ return RegisterArray(r4, r6, r3, r5);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : the object to spread
+ return RegisterArray(r4, r6, r3, r5);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : the arguments list
+ return RegisterArray(r4, r6, r5);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : allocation site or undefined
+ return RegisterArray(r4, r6, r3, r5);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r4); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r4, r3); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r4, r3); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r4, // kApiFunctionAddress
+ r5, // kArgc
+ r6, // kCallData
+ r3); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r3, // argument count (not including receiver)
+ r5, // address of first argument
+ r4); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r3, // argument count (not including receiver)
+ r7, // address of the first argument
+ r4, // constructor to call
+ r6, // new target
+ r5); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r3, // the value to pass to the generator
+ r4); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r3, r4);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+
+#endif // V8_CODEGEN_PPC_INTERFACE_DESCRIPTORS_PPC_INL_H_
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
deleted file mode 100644
index ed304e80fc..0000000000
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r3, r4, r5, r6, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r4; }
-const Register LoadDescriptor::NameRegister() { return r5; }
-const Register LoadDescriptor::SlotRegister() { return r3; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r7;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r4; }
-const Register StoreDescriptor::NameRegister() { return r5; }
-const Register StoreDescriptor::ValueRegister() { return r3; }
-const Register StoreDescriptor::SlotRegister() { return r7; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
-const Register StoreTransitionDescriptor::MapRegister() { return r8; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r3; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r4 : the target to call
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r7 : arguments list length (untagged)
- // r5 : arguments list (FixedArray)
- Register registers[] = {r4, r3, r7, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r5 : start index (to support rest parameters)
- // r4 : the target to call
- Register registers[] = {r4, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r4 : function template info
- // r5 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r5 : the object to spread
- Register registers[] = {r4, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r4 : the target to call
- // r5 : the arguments list
- Register registers[] = {r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r6 : the new target
- // r7 : arguments list length (untagged)
- // r5 : arguments list (FixedArray)
- Register registers[] = {r4, r6, r3, r7, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r6 : the new target
- // r5 : start index (to support rest parameters)
- // r4 : the target to call
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r6 : the new target
- // r5 : the object to spread
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r4 : the target to call
- // r6 : the new target
- // r5 : the arguments list
- Register registers[] = {r4, r6, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r4 : the target to call
- // r6 : the new target
- // r5 : allocation site or undefined
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // kApiFunctionAddress
- r5, // kArgc
- r6, // kCallData
- r3, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // argument count (not including receiver)
- r5, // address of first argument
- r4 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // argument count (not including receiver)
- r7, // address of the first argument
- r4, // constructor to call
- r6, // new target
- r5, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // the value to pass to the generator
- r4 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 658a41f381..e9bce8411f 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -55,7 +56,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kNumCallerSavedDoubles * kDoubleSize;
}
@@ -80,7 +81,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushDoubles(kCallerSavedDoubles);
bytes += kNumCallerSavedDoubles * kDoubleSize;
}
@@ -91,7 +92,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopDoubles(kCallerSavedDoubles);
bytes += kNumCallerSavedDoubles * kDoubleSize;
}
@@ -133,7 +134,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
- LoadP(destination, MemOperand(kRootRegister, offset), r0);
+ LoadU64(destination, MemOperand(kRootRegister, offset), r0);
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
@@ -184,7 +185,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Register scratch = ip;
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
- LoadP(scratch, MemOperand(kRootRegister, offset), r0);
+ LoadU64(scratch, MemOperand(kRootRegister, offset), r0);
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(scratch);
bind(&skip);
@@ -214,9 +215,9 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it.
- LoadP(ToRegister(ABI_TOC_REGISTER),
- MemOperand(scratch, kSystemPointerSize));
- LoadP(scratch, MemOperand(scratch, 0));
+ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(scratch, kSystemPointerSize));
+ LoadU64(scratch, MemOperand(scratch, 0));
}
Jump(scratch);
}
@@ -272,7 +273,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Label skip;
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
- LoadP(ip, MemOperand(kRootRegister, offset));
+ LoadU64(ip, MemOperand(kRootRegister, offset));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
@@ -411,7 +412,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- LoadP(ToRegister(i), MemOperand(location, stack_offset));
+ LoadU64(ToRegister(i), MemOperand(location, stack_offset));
stack_offset += kSystemPointerSize;
}
}
@@ -442,7 +443,7 @@ void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
Simd128Register dreg = Simd128Register::from_code(i);
stack_offset -= kSimd128Size;
li(ip, Operand(stack_offset));
- StoreSimd128(dreg, MemOperand(location, ip), r0, kScratchSimd128Reg);
+ StoreSimd128(dreg, MemOperand(location, ip));
}
}
}
@@ -467,7 +468,7 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
if ((dregs & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
li(ip, Operand(stack_offset));
- LoadSimd128(dreg, MemOperand(location, ip), r0, kScratchSimd128Reg);
+ LoadSimd128(dreg, MemOperand(location, ip));
stack_offset += kSimd128Size;
}
}
@@ -477,8 +478,8 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
- LoadP(destination,
- MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
+ LoadU64(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
@@ -487,7 +488,7 @@ void TurboAssembler::LoadTaggedPointerField(const Register& destination,
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedPointer(destination, field_operand);
} else {
- LoadP(destination, field_operand, scratch);
+ LoadU64(destination, field_operand, scratch);
}
}
@@ -497,7 +498,7 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination,
if (COMPRESS_POINTERS_BOOL) {
DecompressAnyTagged(destination, field_operand);
} else {
- LoadP(destination, field_operand, scratch);
+ LoadU64(destination, field_operand, scratch);
}
}
@@ -505,7 +506,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
if (SmiValuesAre31Bits()) {
lwz(dst, src);
} else {
- LoadP(dst, src);
+ LoadU64(dst, src);
}
SmiUntag(dst, rc);
@@ -550,7 +551,7 @@ void TurboAssembler::DecompressTaggedSigned(Register destination,
void TurboAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
- LoadWord(destination, field_operand, r0);
+ LoadU32(destination, field_operand, r0);
RecordComment("]");
}
@@ -565,7 +566,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedPointer");
- LoadWord(destination, field_operand, r0);
+ LoadU32(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
@@ -573,7 +574,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressAnyTagged(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressAnyTagged");
- LoadWord(destination, field_operand, r0);
+ LoadU32(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
@@ -597,7 +598,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -606,7 +607,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
Add(dst, object, offset - kHeapObjectTag, r0);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
andi(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, cr0);
@@ -615,13 +616,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
}
@@ -752,13 +753,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(object != value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
LoadTaggedPointerField(r0, MemOperand(address));
cmp(r0, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -768,7 +769,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -794,7 +795,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
}
@@ -849,12 +850,12 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
void TurboAssembler::RestoreFrameStateForTailCall() {
if (FLAG_enable_embedded_constant_pool) {
- LoadP(kConstantPoolRegister,
- MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ LoadU64(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
set_constant_pool_available(false);
}
- LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ LoadU64(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
mtlr(r0);
}
@@ -1174,11 +1175,11 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// Drop the execution stack down to the frame pointer and restore
// the caller's state.
int frame_ends;
- LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ LoadU64(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadU64(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
if (FLAG_enable_embedded_constant_pool) {
- LoadP(kConstantPoolRegister,
- MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ LoadU64(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
}
mtlr(r0);
frame_ends = pc_offset();
@@ -1221,7 +1222,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve room for saved entry sp.
subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(r8, Operand::Zero());
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1305,7 +1306,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Restore current context from top and clear it in debug mode.
Move(ip,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- LoadP(cp, MemOperand(ip));
+ LoadU64(cp, MemOperand(ip));
#ifdef DEBUG
mov(r6, Operand(Context::kInvalidContext));
@@ -1393,7 +1394,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
- LoadP(destination, MemOperand(kRootRegister, offset), r0);
+ LoadU64(destination, MemOperand(kRootRegister, offset), r0);
}
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
@@ -1413,7 +1414,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r3: actual arguments count
@@ -1528,9 +1529,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r4);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
@@ -1544,18 +1545,20 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1565,9 +1568,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r4.
DCHECK_EQ(fun, r4);
@@ -1583,15 +1586,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r4.
DCHECK_EQ(function, r4);
@@ -1600,18 +1603,7 @@ void MacroAssembler::InvokeFunction(Register function,
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r4, restart_fp);
- LoadP(r4, MemOperand(r4));
- cmpi(r4, Operand::Zero());
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1625,7 +1617,7 @@ void MacroAssembler::PushStackHandler() {
// Preserve r4-r8.
Move(r3,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
- LoadP(r0, MemOperand(r3));
+ LoadU64(r0, MemOperand(r3));
push(r0);
// Set this new handler as the current one.
@@ -1806,7 +1798,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
- LoadP(result, MemOperand(sp));
+ LoadU64(result, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
pop(r0);
mtlr(r0);
@@ -1873,8 +1865,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r4, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1921,7 +1913,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
- if (emit_debug_code()) Check(cond, reason, cr);
+ if (FLAG_debug_code) Check(cond, reason, cr);
}
void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
@@ -1935,11 +1927,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -1984,7 +1976,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmi, cr0);
@@ -1992,7 +1984,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(eq, AbortReason::kOperandIsNotASmi, cr0);
@@ -2000,7 +1992,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
@@ -2014,7 +2006,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
@@ -2028,7 +2020,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
@@ -2040,7 +2032,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
@@ -2070,7 +2062,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
@@ -2217,9 +2209,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
// AIX/PPC64BE Linux uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER),
- MemOperand(function, kSystemPointerSize));
- LoadP(ip, MemOperand(function, 0));
+ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(function, kSystemPointerSize));
+ LoadU64(ip, MemOperand(function, 0));
dest = ip;
} else if (ABI_CALL_VIA_IP) {
// pLinux and Simualtor, not AIX
@@ -2251,7 +2243,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
if (ActivationFrameAlignment() > kSystemPointerSize) {
- LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
+ LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
addi(sp, sp, Operand(stack_space * kSystemPointerSize));
}
@@ -2263,7 +2255,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
@@ -2703,8 +2695,8 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
}
// Load a "pointer" sized value from the memory location
-void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
+ Register scratch) {
DCHECK_EQ(mem.rb(), no_reg);
int offset = mem.offset();
int misaligned = (offset & 3);
@@ -2797,43 +2789,41 @@ void TurboAssembler::StorePU(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
+ Register scratch) {
int offset = mem.offset();
if (!is_int16(offset)) {
- DCHECK(scratch != no_reg);
+ CHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwax(dst, MemOperand(mem.ra(), scratch));
} else {
-#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
- DCHECK(dst != r0);
+ CHECK(dst != r0);
addi(dst, mem.ra(), Operand((offset & 3) - 4));
lwa(dst, MemOperand(dst, (offset & ~3) + 4));
} else {
lwa(dst, mem);
}
-#else
- lwz(dst, mem);
-#endif
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
-void TurboAssembler::LoadWord(Register dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
if (!is_int16(offset)) {
- LoadIntLiteral(scratch, offset);
+ CHECK(scratch != no_reg);
+ mov(scratch, Operand(offset));
lwzx(dst, MemOperand(base, scratch));
} else {
+ // lwz can handle offset misalign
lwz(dst, mem);
}
}
@@ -2992,22 +2982,8 @@ void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
}
}
-void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
- Register ScratchReg,
- Simd128Register ScratchDoubleReg) {
- // lvx needs the stack to be 16 byte aligned.
- // We first use lxvd/stxvd to copy the content on an aligned address. lxvd
- // itself reverses the lanes so it cannot be used as is.
- lxvd(ScratchDoubleReg, mem);
- mr(ScratchReg, sp);
- ClearRightImm(
- sp, sp,
- Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
- addi(sp, sp, Operand(-16));
- stxvd(ScratchDoubleReg, MemOperand(r0, sp));
- // Load it with correct lane ordering.
- lvx(dst, MemOperand(r0, sp));
- mr(sp, ScratchReg);
+void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem) {
+ lxvx(dst, mem);
}
void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
@@ -3062,21 +3038,8 @@ void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
- Register ScratchReg,
- Simd128Register ScratchDoubleReg) {
- // stvx needs the stack to be 16 byte aligned.
- // We use lxvd/stxvd to store the content on an aligned address. stxvd
- // itself reverses the lanes so it cannot be used as is.
- mr(ScratchReg, sp);
- ClearRightImm(
- sp, sp,
- Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
- addi(sp, sp, Operand(-16));
- stvx(src, MemOperand(r0, sp));
- lxvd(ScratchDoubleReg, MemOperand(r0, sp));
- mr(sp, ScratchReg);
- stxvd(ScratchDoubleReg, mem);
+void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
+ stxvx(src, mem);
}
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
@@ -3115,7 +3078,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
mr(scratch, src);
- LoadP(src, dst, r0);
+ LoadU64(src, dst, r0);
StoreP(scratch, dst, r0);
}
@@ -3137,14 +3100,14 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
src = dst;
dst = temp;
}
- LoadP(scratch_1, dst, scratch_0);
- LoadP(scratch_0, src);
+ LoadU64(scratch_1, dst, scratch_0);
+ LoadU64(scratch_0, src);
StoreP(scratch_1, src);
StoreP(scratch_0, dst, scratch_1);
} else {
- LoadP(scratch_1, dst, scratch_0);
+ LoadU64(scratch_1, dst, scratch_0);
push(scratch_1);
- LoadP(scratch_0, src, scratch_1);
+ LoadU64(scratch_0, src, scratch_1);
StoreP(scratch_0, dst, scratch_1);
pop(scratch_1);
StoreP(scratch_1, src, scratch_0);
@@ -3218,13 +3181,13 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
DCHECK(src != scratch);
// push v0, to be used as scratch
addi(sp, sp, Operand(-kSimd128Size));
- StoreSimd128(v0, MemOperand(r0, sp), r0, scratch);
+ StoreSimd128(v0, MemOperand(r0, sp));
mov(ip, Operand(dst.offset()));
- LoadSimd128(v0, MemOperand(dst.ra(), ip), r0, scratch);
- StoreSimd128(src, MemOperand(dst.ra(), ip), r0, scratch);
+ LoadSimd128(v0, MemOperand(dst.ra(), ip));
+ StoreSimd128(src, MemOperand(dst.ra(), ip));
vor(src, v0, v0);
// restore v0
- LoadSimd128(v0, MemOperand(r0, sp), ip, scratch);
+ LoadSimd128(v0, MemOperand(r0, sp));
addi(sp, sp, Operand(kSimd128Size));
}
@@ -3232,23 +3195,23 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
Simd128Register scratch) {
// push v0 and v1, to be used as scratch
addi(sp, sp, Operand(2 * -kSimd128Size));
- StoreSimd128(v0, MemOperand(r0, sp), ip, scratch);
+ StoreSimd128(v0, MemOperand(r0, sp));
li(ip, Operand(kSimd128Size));
- StoreSimd128(v1, MemOperand(ip, sp), r0, scratch);
+ StoreSimd128(v1, MemOperand(ip, sp));
mov(ip, Operand(src.offset()));
- LoadSimd128(v0, MemOperand(src.ra(), ip), r0, scratch);
+ LoadSimd128(v0, MemOperand(src.ra(), ip));
mov(ip, Operand(dst.offset()));
- LoadSimd128(v1, MemOperand(dst.ra(), ip), r0, scratch);
+ LoadSimd128(v1, MemOperand(dst.ra(), ip));
- StoreSimd128(v0, MemOperand(dst.ra(), ip), r0, scratch);
+ StoreSimd128(v0, MemOperand(dst.ra(), ip));
mov(ip, Operand(src.offset()));
- StoreSimd128(v1, MemOperand(src.ra(), ip), r0, scratch);
+ StoreSimd128(v1, MemOperand(src.ra(), ip));
// restore v0 and v1
- LoadSimd128(v0, MemOperand(r0, sp), ip, scratch);
+ LoadSimd128(v0, MemOperand(r0, sp));
li(ip, Operand(kSimd128Size));
- LoadSimd128(v1, MemOperand(ip, sp), r0, scratch);
+ LoadSimd128(v1, MemOperand(ip, sp));
addi(sp, sp, Operand(2 * kSimd128Size));
}
@@ -3313,7 +3276,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
- LoadWordArith(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
and_(r0, scratch, r0, SetRC);
bne(&if_code_is_off_heap, cr0);
@@ -3326,13 +3289,12 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
- LoadWordArith(scratch,
- FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
- LoadP(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()),
- r0);
+ LoadU64(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()),
+ r0);
bind(&out);
} else {
@@ -3366,8 +3328,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kSystemPointerSize));
- LoadP(ip, MemOperand(target, 0));
+ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(target, kSystemPointerSize));
+ LoadU64(ip, MemOperand(target, 0));
dest = ip;
} else if (ABI_CALL_VIA_IP && dest != ip) {
Move(ip, target);
@@ -3388,8 +3351,8 @@ void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label* ret, Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- LoadP(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+ LoadU64(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 1d8f3a388d..f657f90f76 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -28,8 +28,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -149,10 +147,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// These exist to provide portability between 32 and 64bit
- void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void LoadWordArith(Register dst, const MemOperand& mem,
- Register scratch = no_reg);
+ void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
@@ -161,8 +158,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
- void LoadSimd128(Simd128Register dst, const MemOperand& mem,
- Register ScratchReg, Simd128Register ScratchDoubleReg);
+ void LoadSimd128(Simd128Register dst, const MemOperand& mem);
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
@@ -185,8 +181,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = no_reg);
void StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
- void StoreSimd128(Simd128Register src, const MemOperand& mem,
- Register ScratchReg, Simd128Register ScratchDoubleReg);
+ void StoreSimd128(Simd128Register src, const MemOperand& mem);
void Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
@@ -253,36 +248,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
- LoadP(src2, MemOperand(sp, 0));
- LoadP(src1, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src2, MemOperand(sp, 0));
+ LoadU64(src1, MemOperand(sp, kSystemPointerSize));
addi(sp, sp, Operand(2 * kSystemPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- LoadP(src3, MemOperand(sp, 0));
- LoadP(src2, MemOperand(sp, kSystemPointerSize));
- LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadU64(src3, MemOperand(sp, 0));
+ LoadU64(src2, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
addi(sp, sp, Operand(3 * kSystemPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
- LoadP(src4, MemOperand(sp, 0));
- LoadP(src3, MemOperand(sp, kSystemPointerSize));
- LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize));
- LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadU64(src4, MemOperand(sp, 0));
+ LoadU64(src3, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
addi(sp, sp, Operand(4 * kSystemPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- LoadP(src5, MemOperand(sp, 0));
- LoadP(src4, MemOperand(sp, kSystemPointerSize));
- LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize));
- LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize));
- LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ LoadU64(src5, MemOperand(sp, 0));
+ LoadU64(src4, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
addi(sp, sp, Operand(5 * kSystemPointerSize));
}
@@ -716,7 +711,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
- void LoadWord(Register dst, const MemOperand& mem, Register scratch);
+ void LoadU32(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
private:
@@ -743,7 +738,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
- LoadP(dest, MemOperand(sp, 0));
+ LoadU64(dest, MemOperand(sp, 0));
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
@@ -761,8 +756,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -770,8 +765,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
@@ -837,7 +832,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -848,12 +843,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -926,22 +918,22 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, kSaveFPRegs);
+ CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1053,7 +1045,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index 3936ee80cc..eb4cdb8789 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -41,6 +41,27 @@ constexpr bool ShouldPadArguments(int argument_count) {
return ArgumentPaddingSlots(argument_count) != 0;
}
+#ifdef DEBUG
+struct CountIfValidRegisterFunctor {
+ template <typename RegType>
+ constexpr int operator()(int count, RegType reg) const {
+ return count + (reg.is_valid() ? 1 : 0);
+ }
+};
+
+template <typename RegType, typename... RegTypes,
+ // All arguments must be either Register or DoubleRegister.
+ typename = typename std::enable_if<
+ base::is_same<Register, RegType, RegTypes...>::value ||
+ base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
+inline constexpr bool AreAliased(RegType first_reg, RegTypes... regs) {
+ int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
+ int num_given_regs =
+ base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
+ return num_different_regs < num_given_regs;
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index 57f3a1c62a..49f67ceb1d 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -33,7 +33,7 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int code) {
- CONSTEXPR_DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
+ DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
return SubType{code};
}
@@ -45,7 +45,7 @@ class RegisterBase {
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int code() const {
- CONSTEXPR_DCHECK(is_valid());
+ DCHECK(is_valid());
return reg_code_;
}
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 753b34bdbf..4781e7609b 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -4,6 +4,7 @@
#include "src/codegen/reloc-info.h"
+#include "src/base/vlq.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-reference.h"
#include "src/codegen/external-reference-encoder.h"
@@ -56,11 +57,10 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// the following record in the usual way. The long pc jump record has variable
// length:
// pc-jump: [PC_JUMP] 11
-// [7 bits data] 0
+// 1 [7 bits data]
// ...
-// [7 bits data] 1
-// (Bits 6..31 of pc delta, with leading zeroes
-// dropped, and last non-zero chunk tagged with 1.)
+// 0 [7 bits data]
+// (Bits 6..31 of pc delta, encoded with VLQ.)
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@@ -75,12 +75,6 @@ const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
-const int kChunkBits = 7;
-const int kChunkMask = (1 << kChunkBits) - 1;
-const int kLastChunkTagBits = 1;
-const int kLastChunkTagMask = 1;
-const int kLastChunkTag = 1;
-
uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
// Otherwise write a variable length PC jump for the bits that do
@@ -89,13 +83,12 @@ uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
WriteMode(RelocInfo::PC_JUMP);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
DCHECK_GT(pc_jump, 0);
- // Write kChunkBits size chunks of the pc_jump.
- for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
- byte b = pc_jump & kChunkMask;
- *--pos_ = b << kLastChunkTagBits;
- }
- // Tag the last chunk so it can be identified.
- *pos_ = *pos_ | kLastChunkTag;
+ base::VLQEncodeUnsigned(
+ [this](byte byte) {
+ *--pos_ = byte;
+ return pos_;
+ },
+ pc_jump);
// Return the remaining kSmallPCDeltaBits of the pc_delta.
return pc_delta & kSmallPCDeltaMask;
}
@@ -205,14 +198,8 @@ void RelocIterator::AdvanceReadData() {
void RelocIterator::AdvanceReadLongPCJump() {
// Read the 32-kSmallPCDeltaBits most significant bits of the
- // pc jump in kChunkBits bit chunks and shift them into place.
- // Stop when the last chunk is encountered.
- uint32_t pc_jump = 0;
- for (int i = 0; i < kIntSize; i++) {
- byte pc_jump_part = *--pos_;
- pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
- if ((pc_jump_part & kLastChunkTagMask) == 1) break;
- }
+ // pc jump as a VLQ encoded integer.
+ uint32_t pc_jump = base::VLQDecodeUnsigned([this] { return *--pos_; });
// The least significant kSmallPCDeltaBits bits will be added
// later.
rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
@@ -450,7 +437,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "unknown relocation type";
}
-void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
+void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
os << reinterpret_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
os << " (" << data() << ")";
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index bef433e10b..e8b3c0b98b 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -328,8 +328,8 @@ class RelocInfo {
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
- void Print(Isolate* isolate, std::ostream& os); // NOLINT
-#endif // ENABLE_DISASSEMBLER
+ void Print(Isolate* isolate, std::ostream& os);
+#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void Verify(Isolate* isolate);
#endif
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index 40bd56d15b..d301a00bf4 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -63,11 +63,15 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -133,9 +137,13 @@ HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
+ }
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
@@ -163,11 +171,11 @@ void RelocInfo::set_target_external_reference(
}
Address RelocInfo::target_internal_reference() {
- if (rmode_ == INTERNAL_REFERENCE) {
+ if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are j/jal instructions.
- DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReferenceEncoded(rmode_));
DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize)));
Address address = Assembler::target_address_at(pc_);
return address;
@@ -175,10 +183,20 @@ Address RelocInfo::target_internal_reference() {
}
Address RelocInfo::target_internal_reference_address() {
- DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr1 = Assembler::instr_at(pc);
+ Instr instr2 = Assembler::instr_at(pc + kInstrSize);
+ DCHECK(IsAuipc(instr1));
+ DCHECK(IsJalr(instr2));
+ int32_t code_target_index = BrachlongOffset(instr1, instr2);
+ return GetCodeTarget(code_target_index);
+}
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 914ea26f9f..35c56ccdf5 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -128,7 +128,8 @@ Register ToRegister(int num) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -454,6 +455,16 @@ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
return instr;
}
+static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
+ DCHECK(Assembler::IsJalr(instr));
+ DCHECK(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ DCHECK(Assembler::IsJalr(instr | (imm12 & kImm12Mask)));
+ DCHECK(Assembler::JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
+ return instr | (imm12 & kImm12Mask);
+}
+
static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) {
DCHECK(Assembler::IsJal(instr));
int32_t imm = target_pos - pos;
@@ -689,17 +700,36 @@ int Assembler::CJumpOffset(Instr instr) {
int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) {
DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
InstructionBase::kIType);
- const int kImm19_0Mask = ((1 << 20) - 1);
- int32_t imm_auipc = auipc & (kImm19_0Mask << 12);
- int32_t imm_12 = instr_I >> 20;
- int32_t offset = imm_12 + imm_auipc;
+ DCHECK(IsAuipc(auipc));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = (instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
return offset;
}
+int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc,
+ Instr instr_jalr, int32_t offset) {
+ DCHECK(IsAuipc(instr_auipc));
+ DCHECK(IsJalr(instr_jalr));
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ CHECK(is_int32(offset));
+ instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc));
+ instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr));
+ DCHECK(offset ==
+ BrachlongOffset(Assembler::instr_at(pc), Assembler::instr_at(pc + 4)));
+ return 2;
+}
+
int Assembler::LdOffset(Instr instr) {
DCHECK(IsLd(instr));
int32_t imm12 = (instr & kImm12Mask) >> 20;
- imm12 = imm12 << 12 >> 12;
+ return imm12;
+}
+
+int Assembler::JalrOffset(Instr instr) {
+ DCHECK(IsJalr(instr));
+ int32_t imm12 = (instr & kImm12Mask) >> 20;
return imm12;
}
@@ -717,7 +747,7 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
}
void Assembler::disassembleInstr(Instr instr) {
- if (!FLAG_debug_riscv) return;
+ if (!FLAG_riscv_debug) return;
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
@@ -2567,9 +2597,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
// Must not overwrite the register 'base' while loading 'offset'.
DCHECK(src->rm() != scratch);
- RV_li(scratch, src->offset());
- add(scratch, scratch, src->rm());
- src->offset_ = 0;
+ constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
+ constexpr int32_t kMaxOffsetForSimpleAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ addi(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addi(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
+ } else if (access_type == OffsetAccessType::SINGLE_ACCESS) {
+ RV_li(scratch, (static_cast<int64_t>(src->offset()) + 0x800) >> 12 << 12);
+ add(scratch, scratch, src->rm());
+ src->offset_ = src->offset() << 20 >> 20;
+ } else {
+ RV_li(scratch, src->offset());
+ add(scratch, scratch, src->rm());
+ src->offset_ = 0;
+ }
src->rm_ = scratch;
}
@@ -2596,6 +2642,22 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ if (IsAuipc(instr) && IsJalr(instr1)) {
+ int32_t imm;
+ imm = BrachlongOffset(instr, instr1);
+ imm -= pc_delta;
+ PatchBranchlongOffset(pc, instr, instr1, imm);
+ return;
+ } else {
+ UNREACHABLE();
+ }
+}
+
void Assembler::GrowBuffer() {
DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
// Compute new buffer size.
@@ -2766,12 +2828,23 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) {
- DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
- int32_t Hi20 = AuipcOffset(*instr);
- int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
- Memory<Address>(pc + Hi20 + Lo12) = target;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ Memory<Address>(pc + Hi20 + Lo12) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ }
+ } else {
+ DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
+ int64_t imm = (int64_t)target - (int64_t)pc;
+ Instr instr = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ DCHECK(is_int32(imm));
+ int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, num * kInstrSize);
+ }
}
} else {
set_target_address_at(pc, target, icache_flush_mode);
@@ -2781,10 +2854,17 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) {
- DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
- int32_t Hi20 = AuipcOffset(*instr);
- int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
- return Memory<Address>(pc + Hi20 + Lo12);
+ if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return Memory<Address>(pc + Hi20 + Lo12);
+ } else {
+ DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = JalrOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return pc + Hi20 + Lo12;
+ }
+
} else {
return target_address_at(pc);
}
@@ -2890,17 +2970,17 @@ bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
- // 0: ld x0, t3, #offset
+ // 0: ld x0, x0, #offset
Instr instr_value = *reinterpret_cast<Instr*>(instr);
-
- bool result = IsLd(instr_value) && (instr->RdValue() == kRegCode_zero_reg);
- // It is still worth asserting the marker is complete.
- // 4: j 0
+ bool result = IsLd(instr_value) && (instr->Rs1Value() == kRegCode_zero_reg) &&
+ (instr->RdValue() == kRegCode_zero_reg);
#ifdef DEBUG
- Instruction* instr_fllowing = instr + kInstrSize;
- DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_fllowing)) &&
- instr_fllowing->Imm20JValue() == 0 &&
- instr_fllowing->RdValue() == kRegCode_zero_reg));
+ // It is still worth asserting the marker is complete.
+ // 1: j 0x0
+ Instruction* instr_following = instr + kInstrSize;
+ DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_following)) &&
+ instr_following->Imm20JValue() == 0 &&
+ instr_following->RdValue() == kRegCode_zero_reg));
#endif
return result;
}
@@ -2941,9 +3021,9 @@ void ConstantPool::EmitPrologue(Alignment require_alignment) {
int ConstantPool::PrologueSize(Jump require_jump) const {
// Prologue is:
- // j over ;; if require_jump
- // ld x0, t3, #pool_size
- // j xzr
+ // j over ;; if require_jump
+ // ld x0, x0, #pool_size
+ // j 0x0
int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
prologue_size += 2 * kInstrSize;
return prologue_size;
@@ -2954,7 +3034,7 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
const ConstantPoolKey& key) {
Instr instr_auipc = assm_->instr_at(load_offset);
Instr instr_ld = assm_->instr_at(load_offset + 4);
- // Instruction to patch must be 'ld t3, t3, offset' with offset == kInstrSize.
+ // Instruction to patch must be 'ld rd, offset(rd)' with 'offset == 0'.
DCHECK(assm_->IsAuipc(instr_auipc));
DCHECK(assm_->IsLd(instr_ld));
DCHECK_EQ(assm_->LdOffset(instr_ld), 0);
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 1dcf4e0aae..ff66351d6a 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -53,7 +53,7 @@ namespace v8 {
namespace internal {
#define DEBUG_PRINTF(...) \
- if (FLAG_debug_riscv) { \
+ if (FLAG_riscv_debug) { \
printf(__VA_ARGS__); \
}
@@ -160,6 +160,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler() { CHECK(constpool_.IsEmpty()); }
+ void AbortedCodeGeneration() { constpool_.Clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
@@ -208,11 +209,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Get offset from instr.
int BranchOffset(Instr instr);
- int BrachlongOffset(Instr auipc, Instr jalr);
+ static int BrachlongOffset(Instr auipc, Instr jalr);
+ static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I,
+ int32_t offset);
int JumpOffset(Instr instr);
int CJumpOffset(Instr instr);
static int LdOffset(Instr instr);
static int AuipcOffset(Instr instr);
+ static int JalrOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
@@ -800,6 +804,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
@@ -862,8 +868,40 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsLd(Instr instr);
void CheckTrampolinePool();
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ using BlockPoolsScope = BlockTrampolinePoolScope;
+
+ void RecordConstPool(int size);
+
+ void ForceConstantPoolEmissionWithoutJump() {
+ constpool_.Check(Emission::kForced, Jump::kOmitted);
+ }
+ void ForceConstantPoolEmissionWithJump() {
+ constpool_.Check(Emission::kForced, Jump::kRequired);
+ }
+ // Check if the const pool needs to be emitted while pretending that {margin}
+ // more bytes of instructions have already been emitted.
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
+ }
+
+ void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
+ }
+
+ void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
+ void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -949,34 +987,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
- using BlockPoolsScope = BlockTrampolinePoolScope;
-
- void RecordConstPool(int size);
-
- void ForceConstantPoolEmissionWithoutJump() {
- constpool_.Check(Emission::kForced, Jump::kOmitted);
- }
- void ForceConstantPoolEmissionWithJump() {
- constpool_.Check(Emission::kForced, Jump::kRequired);
- }
- // Check if the const pool needs to be emitted while pretending that {margin}
- // more bytes of instructions have already been emitted.
- void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
- constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
- }
-
- void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
- constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
- }
-
- void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
- constpool_.RecordEntry(data, rmode);
- }
-
- void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
- constpool_.RecordEntry(data, rmode);
- }
-
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1231,6 +1241,16 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
private:
RegList* available_;
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.cc b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
index 045488bf7f..d2709dc2c7 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -106,8 +106,11 @@ int FPURegisters::Number(const char* name) {
}
InstructionBase::Type InstructionBase::InstructionType() const {
+ if (IsIllegalInstruction()) {
+ return kUnsupported;
+ }
// RV64C Instruction
- if (IsShortInstruction()) {
+ if (FLAG_riscv_c_extension && IsShortInstruction()) {
switch (InstructionBits() & kRvcOpcodeMask) {
case RO_C_ADDI4SPN:
return kCIWType;
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index 3b5ffff6da..c8f54d8f7f 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -8,6 +8,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
// UNIMPLEMENTED_ macro for RISCV.
#ifdef DEBUG
@@ -55,8 +56,7 @@ const uint32_t kLessSignificantWordInDoublewordOffset = 4;
namespace v8 {
namespace internal {
-// TODO(sigurds): Change this value once we use relative jumps.
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -727,18 +727,25 @@ class InstructionBase {
kUnsupported = -1
};
+ inline bool IsIllegalInstruction() const {
+ uint8_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
+ return FirstHalfWord == 0;
+ }
+
inline bool IsShortInstruction() const {
uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
return (FirstByte & 0x03) <= C2;
}
inline uint8_t InstructionSize() const {
- return this->IsShortInstruction() ? kShortInstrSize : kInstrSize;
+ return (FLAG_riscv_c_extension && this->IsShortInstruction())
+ ? kShortInstrSize
+ : kInstrSize;
}
// Get the raw instruction bits.
inline Instr InstructionBits() const {
- if (this->IsShortInstruction()) {
+ if (FLAG_riscv_c_extension && this->IsShortInstruction()) {
return 0x0000FFFF & (*reinterpret_cast<const ShortInstr*>(this));
}
return *reinterpret_cast<const Instr*>(this);
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
new file mode 100644
index 0000000000..4a8bb0d9ee
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
@@ -0,0 +1,265 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
+#define V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/base/template-utils.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return a2;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ // a2: start index (to supported rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a3: new target
+ // a1: target
+ // a0: number of arguments
+ // a2: start index (to supported rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a3: new target
+ // a1: target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(a1, // kApiFunctionAddress
+ a2, // kArgc
+ a3, // kCallData
+ a0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(a0, // argument count (not including receiver)
+ a2, // address of first argument
+ a1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ a0, // argument count (not including receiver)
+ a4, // address of the first argument
+ a1, // constructor to call
+ a3, // new target
+ a2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(a0, // the value to pass to the generator
+ a1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
+
+#endif // V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
deleted file mode 100644
index 23953097cd..0000000000
--- a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_RISCV64
-
-#include "src/codegen/interface-descriptors.h"
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a1, a2, a3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return a4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return a4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index ff798da0e9..801a74f569 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -61,7 +62,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -86,7 +87,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -97,7 +98,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -175,7 +176,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -184,7 +185,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Add64(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -197,13 +198,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
@@ -336,14 +337,14 @@ void MacroAssembler::RecordWrite(Register object, Register address,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK(!AreAliased(object, address, value, kScratchReg));
Ld(kScratchReg, MemOperand(address));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite,
kScratchReg, Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -353,7 +354,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -378,7 +379,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
@@ -393,6 +394,10 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addiw(rd, rs, rt.immediate() / 2);
+ addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
@@ -409,6 +414,10 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addi(rd, rs, rt.immediate() / 2);
+ addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
@@ -429,6 +438,10 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
addiw(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addiw(rd, rs, -rt.immediate() / 2);
+ addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -452,6 +465,10 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
addi(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addi(rd, rs, -rt.immediate() / 2);
+ addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
} else {
int li_count = InstrCountForLi64Bit(rt.immediate());
int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
@@ -884,6 +901,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (rt.is_reg()) {
negw(scratch, rt.rm());
sllw(scratch, rs, scratch);
@@ -908,6 +926,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (rt.is_reg()) {
negw(scratch, rt.rm());
sll(scratch, rs, scratch);
@@ -928,9 +947,10 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
- uint8_t sa, Register scratch) {
+ uint8_t sa) {
DCHECK(sa >= 1 && sa <= 31);
- Register tmp = rd == rt ? scratch : rd;
+ UseScratchRegisterScope temps(this);
+ Register tmp = rd == rt ? temps.Acquire() : rd;
DCHECK(tmp != rt);
slli(tmp, rs, sa);
Add64(rd, rt, tmp);
@@ -1215,8 +1235,9 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
-void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
- Register scratch) {
+void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Lwu(rd, rs);
Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
slli(scratch, scratch, 32);
@@ -1228,8 +1249,9 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
-void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
- Register scratch) {
+void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Sw(rd, rs);
srai(scratch, rd, 32);
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
@@ -1464,7 +1486,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
UseScratchRegisterScope temps(this);
int count = li_estimate(j.immediate(), temps.hasAvailable());
int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable());
- if (!FLAG_disable_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
+ if (FLAG_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
// Ld a Address from a constant pool.
RecordEntry((uint64_t)j.immediate(), j.rmode());
auipc(rd, 0);
@@ -1864,6 +1886,28 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
fmv_s(dst, src);
}
}
+ {
+ Label not_NaN;
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // According to the wasm spec
+ // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
+ // if input is canonical NaN, then output is canonical NaN, and if input is
+ // any other NaN, then output is any NaN with most significant bit of
+ // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
+ // src is not a NaN, branch to the label and do nothing, but if it is,
+ // fmin_d will set dst to the canonical NaN.
+ if (std::is_same<F, double>::value) {
+ feq_d(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_d(dst, src, src);
+ } else {
+ feq_s(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_s(dst, src, src);
+ }
+ bind(&not_NaN);
+ }
// If real exponent (i.e., t6 - kFloatExponentBias) is greater than
// kFloat32MantissaBits, it means the floating-point value has no fractional
@@ -2030,8 +2074,8 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
- FPURegister cmp2) {
+void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
@@ -2039,11 +2083,10 @@ void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
- Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
- FPURegister cmp2) {
+void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
@@ -2051,7 +2094,18 @@ void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
- Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
void TurboAssembler::BranchTrueShortF(Register rs, Label* target) {
@@ -2949,9 +3003,20 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
-
- if (root_array_available_ && options().isolate_independent_code &&
- target_is_isolate_independent_builtin) {
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ GenPCRelativeJump(t6, code_target_index);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
Ld(t6, MemOperand(kRootRegister, offset));
@@ -3017,8 +3082,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
- if (root_array_available_ && options().isolate_independent_code &&
- target_is_isolate_independent_builtin) {
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ GenPCRelativeJumpAndLink(t6, code_target_index);
+ bind(&skip);
+ RecordComment("]");
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadRootRelative(t6, offset);
@@ -3059,6 +3138,46 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::CallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ if (options().short_builtin_calls) {
+ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Call(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::TailCallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ if (options().short_builtin_calls) {
+ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Jump(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination) {
+ Ld(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
void TurboAssembler::PatchAndJump(Address target) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3115,16 +3234,28 @@ void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
}
}
+void TurboAssembler::GenPCRelativeJump(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
+void TurboAssembler::GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jalr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
void TurboAssembler::BranchLong(Label* L) {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64;
imm64 = branch_long_offset(L);
- DCHECK(is_int32(imm64));
- int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- auipc(t6, Hi20); // Read PC + Hi20 into scratch.
- jr(t6, Lo12); // jump PC + Hi20 + Lo12
+ GenPCRelativeJump(t6, imm64);
EmitConstPoolWithJumpIfNeeded();
}
@@ -3133,11 +3264,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64;
imm64 = branch_long_offset(L);
- DCHECK(is_int32(imm64));
- int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- auipc(t6, Hi20); // Read PC + Hi20 into scratch.
- jalr(t6, Lo12); // jump PC + Hi20 + Lo12 and read PC + 4 to ra
+ GenPCRelativeJumpAndLink(t6, imm64);
}
void TurboAssembler::DropAndRet(int drop) {
@@ -3251,14 +3378,6 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(scratch);
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- Ld(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -3294,16 +3413,10 @@ void MacroAssembler::PopStackHandler() {
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- Label NotNaN;
-
- fmv_d(dst, src);
- feq_d(scratch, src, src);
- bne(scratch, zero_reg, &NotNaN);
- RV_li(scratch, 0x7ff8000000000000ULL); // This is the canonical NaN
- fmv_d_x(dst, scratch);
- bind(&NotNaN);
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+ fsub_d(dst, src, kDoubleRegZero);
}
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
@@ -3414,7 +3527,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -3524,9 +3637,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -3540,17 +3653,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3560,9 +3675,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -3576,15 +3691,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(a1, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -3593,7 +3708,7 @@ void MacroAssembler::InvokeFunction(Register function,
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -3734,15 +3849,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
// Ld a Address from a constant pool.
// Record a value into constant pool.
- if (FLAG_disable_riscv_constant_pool) {
+ if (!FLAG_riscv_constant_pool) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
} else {
RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET);
@@ -3795,7 +3910,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -3810,11 +3925,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -3882,19 +3997,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- int stack_offset = -3 * kPointerSize;
- const int fp_offset = 1 * kPointerSize;
- addi(sp, sp, stack_offset);
- stack_offset = -stack_offset - kPointerSize;
- Sd(ra, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- Sd(fp, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- li(scratch, Operand(StackFrame::TypeToMarker(type)));
- Sd(scratch, MemOperand(sp, stack_offset));
- // Adjust FP to point to saved FP.
- DCHECK_EQ(stack_offset, 0);
- Add64(fp, sp, Operand(fp_offset));
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ Push(scratch);
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -3935,7 +4043,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
addi(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -4011,11 +4119,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Ld(cp, MemOperand(scratch));
-#ifdef DEBUG
- li(scratch,
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- Sd(a3, MemOperand(scratch));
-#endif
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch2 = temp.Acquire();
+ li(scratch2, Operand(Context::kInvalidContext));
+ Sd(scratch2, MemOperand(scratch));
+ }
// Pop the arguments, restore registers, and return.
mv(sp, fp); // Respect ABI stack constraint.
@@ -4026,7 +4135,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
add(sp, sp, argument_count);
} else {
- CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2, scratch);
+ CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2);
}
}
@@ -4054,7 +4163,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -4084,22 +4193,24 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch) {
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch) {
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4108,7 +4219,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4117,7 +4228,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK(object != kScratchReg);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
@@ -4134,7 +4245,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4151,7 +4262,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4165,7 +4276,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4193,7 +4304,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -4229,11 +4340,11 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (std::is_same<float, F_TYPE>::value) {
- CompareIsNanF32(scratch, src1, src2);
+ CompareIsNotNanF32(scratch, src1, src2);
} else {
- CompareIsNanF64(scratch, src1, src2);
+ CompareIsNotNanF64(scratch, src1, src2);
}
- BranchTrueF(scratch, &nan);
+ BranchFalseF(scratch, &nan);
if (kind == MaxMinKind::kMax) {
if (std::is_same<float, F_TYPE>::value) {
@@ -4330,11 +4441,9 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- li(scratch, function);
- CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
+ li(t6, function);
+ CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
@@ -4363,7 +4472,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// PrepareCallCFunction.
#if V8_HOST_ARCH_RISCV64
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
@@ -4387,12 +4496,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
{
- UseScratchRegisterScope temps(this);
- Register func_scratch = temps.Acquire();
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (function != func_scratch) {
- mv(func_scratch, function);
- function = func_scratch;
+ if (function != t6) {
+ mv(t6, function);
+ function = t6;
}
// Save the frame pointer and PC so that the stack layout remains
@@ -4401,7 +4507,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// 't' registers are caller-saved so this is safe as a scratch register.
Register pc_scratch = t1;
Register scratch = t2;
- DCHECK(!AreAliased(pc_scratch, scratch, function));
auipc(pc_scratch, 0);
// TODO(RISCV): Does this need an offset? It seems like this should be the
@@ -4494,12 +4599,10 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label* ret, Label*) {
- UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
- Ld(scratch,
+ Ld(t6,
MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
- Call(scratch);
+ Call(t6);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index b260f1c200..81e5565606 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -53,8 +54,6 @@ enum LiFlags {
ADDRESS_LOAD = 2
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -166,6 +165,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Compare double, if any operand is NaN, result is false except for NE
void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
+ void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
@@ -187,6 +188,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
li(rd, Operand(j), mode);
}
+ inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
+
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
@@ -197,6 +200,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
+ inline void GenPCRelativeJump(Register rd, int64_t imm32);
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
@@ -223,7 +228,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination);
+ MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
+ void CallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return CallBuiltin(static_cast<int>(builtin));
+ }
+ void CallBuiltin(int builtin_index);
+ void TailCallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return TailCallBuiltin(static_cast<int>(builtin));
+ }
+ void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -799,7 +817,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = t3);
+ void JumpIfSmi(Register value, Label* smi_label);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
Branch(dest, eq, a, Operand(b));
@@ -816,8 +834,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
static int ActivationFrameAlignment();
// Calculated scaled address (rd) as rt + rs << sa
- void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa,
- Register scratch = t3);
+ void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa);
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
@@ -953,8 +970,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -962,16 +979,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// void Pref(int32_t hint, const MemOperand& rs);
// ---------------------------------------------------------------------------
// Pseudo-instructions.
- void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
- void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
+ void LoadWordPair(Register rd, const MemOperand& rs);
+ void StoreWordPair(Register rd, const MemOperand& rs);
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
@@ -1011,7 +1028,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1022,12 +1039,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1051,18 +1065,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1131,8 +1145,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch = t3);
+ void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
@@ -1170,7 +1183,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index b97594becd..4aacad611d 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -337,7 +337,7 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-constexpr Register kOffHeapTrampolineRegister = t3;
+constexpr Register kOffHeapTrampolineRegister = t6;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index dd5f59bc0b..da51395dfd 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -160,7 +160,11 @@ static bool supportsSTFLE() {
}
bool CpuFeatures::SupportsWasmSimd128() {
+#if V8_ENABLE_WEBASSEMBLY
return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
void CpuFeatures::ProbeImpl(bool cross_compile) {
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
new file mode 100644
index 0000000000..d672c4354d
--- /dev/null
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_S390_INTERFACE_DESCRIPTORS_S390_INL_H_
+#define V8_CODEGEN_S390_INTERFACE_DESCRIPTORS_S390_INL_H_
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r2, r3, r4, r5, r6);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r2, r3, r4, r5, r6, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r2, r3, r4, r5, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r2, r3, r4, r5, r6, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r3; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r4; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r2; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r6;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r3; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r4; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r2; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r6; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r7; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r2; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r5; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r5;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r6;
+}
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r5); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r2 : number of arguments
+ // r3 : the target to call
+ return RegisterArray(r3, r2);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r6 : arguments list length (untagged)
+ // r4 : arguments list (FixedArray)
+ return RegisterArray(r3, r2, r6, r4);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r2 : number of arguments
+ // r4 : start index (to support rest parameters)
+ // r3 : the target to call
+ return RegisterArray(r3, r2, r4);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r3 : function template info
+ // r4 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r3, r4);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r2: number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r4 : the object to spread
+ return RegisterArray(r3, r2, r4);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r3 : the target to call
+ // r4 : the arguments list
+ return RegisterArray(r3, r4);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r5 : the new target
+ // r6 : arguments list length (untagged)
+ // r4 : arguments list (FixedArray)
+ return RegisterArray(r3, r5, r2, r6, r4);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r2 : number of arguments
+ // r5 : the new target
+ // r4 : start index (to support rest parameters)
+ // r3 : the target to call
+ return RegisterArray(r3, r5, r2, r4);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : the object to spread
+ return RegisterArray(r3, r5, r2, r4);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : the arguments list
+ return RegisterArray(r3, r5, r4);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r2 : number of arguments
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : allocation site or undefined
+ return RegisterArray(r3, r5, r2, r4);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r3); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r3, r2); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r3, r2); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r3, // kApiFunctionAddress
+ r4, // kArgc
+ r5, // kCallData
+ r2); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r2, // argument count (not including receiver)
+ r4, // address of first argument
+ r3); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r2, // argument count (not including receiver)
+ r6, // address of the first argument
+ r3, // constructor to call
+ r5, // new target
+ r4); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r2, // the value to pass to the generator
+ r3); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r2, r3);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
+
+#endif // V8_CODEGEN_S390_INTERFACE_DESCRIPTORS_S390_INL_H_
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
deleted file mode 100644
index 9a9ecdcb8b..0000000000
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r2, r3, r4, r5, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r3; }
-const Register LoadDescriptor::NameRegister() { return r4; }
-const Register LoadDescriptor::SlotRegister() { return r2; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r6;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r3; }
-const Register StoreDescriptor::NameRegister() { return r4; }
-const Register StoreDescriptor::ValueRegister() { return r2; }
-const Register StoreDescriptor::SlotRegister() { return r6; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r6; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
-const Register StoreTransitionDescriptor::MapRegister() { return r7; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r2; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r3 : the target to call
- Register registers[] = {r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r6 : arguments list length (untagged)
- // r4 : arguments list (FixedArray)
- Register registers[] = {r3, r2, r6, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r4 : start index (to support rest parameters)
- // r3 : the target to call
- Register registers[] = {r3, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : function template info
- // r4 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r4 : the object to spread
- Register registers[] = {r3, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : the target to call
- // r4 : the arguments list
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r5 : the new target
- // r6 : arguments list length (untagged)
- // r4 : arguments list (FixedArray)
- Register registers[] = {r3, r5, r2, r6, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r5 : the new target
- // r4 : start index (to support rest parameters)
- // r3 : the target to call
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r5 : the new target
- // r4 : the object to spread
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : the target to call
- // r5 : the new target
- // r4 : the arguments list
- Register registers[] = {r3, r5, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r3 : the target to call
- // r5 : the new target
- // r4 : allocation site or undefined
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // kApiFunctionAddress
- r4, // kArgc
- r5, // kCallData
- r2, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // argument count (not including receiver)
- r4, // address of first argument
- r3 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // argument count (not including receiver)
- r6, // address of the first argument
- r3, // constructor to call
- r5, // new target
- r4, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // the value to pass to the generator
- r3 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r2, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index be5798d8d4..de25a93d8b 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -40,6 +41,12 @@ namespace internal {
void TurboAssembler::DoubleMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmax(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(3));
+ return;
+ }
+
Label check_zero, return_left, return_right, return_nan, done;
cdbr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -80,6 +87,11 @@ void TurboAssembler::DoubleMax(DoubleRegister result_reg,
void TurboAssembler::DoubleMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmin(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(3));
+ return;
+ }
Label check_zero, return_left, return_right, return_nan, done;
cdbr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -126,6 +138,11 @@ void TurboAssembler::DoubleMin(DoubleRegister result_reg,
void TurboAssembler::FloatMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmax(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(2));
+ return;
+ }
Label check_zero, return_left, return_right, return_nan, done;
cebr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -166,6 +183,12 @@ void TurboAssembler::FloatMax(DoubleRegister result_reg,
void TurboAssembler::FloatMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmin(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(2));
+ return;
+ }
+
Label check_zero, return_left, return_right, return_nan, done;
cebr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -208,6 +231,39 @@ void TurboAssembler::FloatMin(DoubleRegister result_reg,
}
bind(&done);
}
+
+void TurboAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_POS_INF, dst, src);
+}
+
+void TurboAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_POS_INF, dst, src);
+}
+
+void TurboAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_NEG_INF, dst, src);
+}
+
+void TurboAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_NEG_INF, dst, src);
+}
+
+void TurboAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_0, dst, src);
+}
+
+void TurboAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_0, dst, src);
+}
+
+void TurboAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+}
+
+void TurboAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -227,7 +283,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
@@ -252,7 +308,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushDoubles(kCallerSavedDoubles);
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
@@ -263,7 +319,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopDoubles(kCallerSavedDoubles);
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
@@ -759,7 +815,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -768,7 +824,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
lay(dst, MemOperand(object, offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
AndP(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, Label::kNear);
@@ -777,13 +833,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
}
@@ -911,13 +967,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(object != value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
LoadTaggedPointerField(r0, MemOperand(address));
CmpS64(value, r0);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -926,7 +982,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -950,7 +1006,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
}
@@ -1352,7 +1408,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve room for saved entry sp.
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
StoreU64(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(),
r1);
}
@@ -1537,7 +1593,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r2: actual arguments count
@@ -1653,9 +1709,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r3);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
@@ -1669,18 +1725,20 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -1689,9 +1747,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r3.
DCHECK_EQ(fun, r3);
@@ -1707,15 +1765,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r3.
DCHECK_EQ(function, r3);
@@ -1725,18 +1783,7 @@ void MacroAssembler::InvokeFunction(Register function,
FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r3, restart_fp);
- LoadU64(r3, MemOperand(r3));
- CmpS64(r3, Operand::Zero());
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1904,8 +1951,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r3, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1947,11 +1994,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
- if (emit_debug_code()) Check(cond, reason, cr);
+ if (FLAG_debug_code) Check(cond, reason, cr);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
@@ -1965,11 +2012,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2017,7 +2064,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmi, cr0);
@@ -2025,7 +2072,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(eq, AbortReason::kOperandIsNotASmi, cr0);
@@ -2033,7 +2080,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object, Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
@@ -2045,7 +2092,7 @@ void MacroAssembler::AssertConstructor(Register object, Register scratch) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
@@ -2059,7 +2106,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
@@ -2071,7 +2118,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
@@ -2101,7 +2148,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index f2719c3086..13d7ac696b 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -32,8 +32,6 @@ inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
return MemOperand(object, index, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -70,6 +68,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DoubleRegister right_reg);
void FloatMin(DoubleRegister result_reg, DoubleRegister left_reg,
DoubleRegister right_reg);
+ void CeilF32(DoubleRegister dst, DoubleRegister src);
+ void CeilF64(DoubleRegister dst, DoubleRegister src);
+ void FloorF32(DoubleRegister dst, DoubleRegister src);
+ void FloorF64(DoubleRegister dst, DoubleRegister src);
+ void TruncF32(DoubleRegister dst, DoubleRegister src);
+ void TruncF64(DoubleRegister dst, DoubleRegister src);
+ void NearestIntF32(DoubleRegister dst, DoubleRegister src);
+ void NearestIntF64(DoubleRegister dst, DoubleRegister src);
+
void LoadFromConstantsTable(Register destination,
int constant_index) override;
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
@@ -769,9 +776,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetRoundingMode();
// These exist to provide portability between 32 and 64bit
- void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg) {
- LoadU64(dst, mem, scratch);
- }
void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
@@ -1094,22 +1098,22 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, kSaveFPRegs);
+ CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1220,7 +1224,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1231,12 +1235,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -1353,8 +1354,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1362,15 +1363,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 58fb6ed9e1..67a17d5f0e 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -20,20 +20,18 @@ namespace internal {
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
: SafepointTable(code.InstructionStart(isolate, pc),
- code.SafepointTableAddress(), code.stack_slots(), true) {}
+ code.SafepointTableAddress(), true) {}
#if V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(const wasm::WasmCode* code)
: SafepointTable(code->instruction_start(),
code->instruction_start() + code->safepoint_table_offset(),
- code->stack_slots(), false) {}
+ false) {}
#endif // V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(Address instruction_start,
- Address safepoint_table_address,
- uint32_t stack_slots, bool has_deopt)
+ Address safepoint_table_address, bool has_deopt)
: instruction_start_(instruction_start),
- stack_slots_(stack_slots),
has_deopt_(has_deopt),
safepoint_table_address_(safepoint_table_address),
length_(ReadLength(safepoint_table_address)),
@@ -69,27 +67,18 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
UNREACHABLE();
}
-void SafepointTable::PrintEntry(unsigned index,
- std::ostream& os) const { // NOLINT
+void SafepointTable::PrintEntry(unsigned index, std::ostream& os) const {
disasm::NameConverter converter;
SafepointEntry entry = GetEntry(index);
uint8_t* bits = entry.bits();
// Print the stack slot bits.
if (entry_size_ > 0) {
- const int first = 0;
- int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(os, bits[i], kBitsPerByte);
- int last_bits = stack_slots_ - ((last - first) * kBitsPerByte);
- PrintBits(os, bits[last], last_bits);
- }
-}
-
-void SafepointTable::PrintBits(std::ostream& os, // NOLINT
- uint8_t byte, int digits) {
- DCHECK(digits >= 0 && digits <= kBitsPerByte);
- for (int i = 0; i < digits; i++) {
- os << (((byte & (1 << i)) == 0) ? "0" : "1");
+ for (uint32_t i = 0; i < entry_size_; ++i) {
+ for (int bit = 0; bit < kBitsPerByte; ++bit) {
+ os << ((bits[i] & (1 << bit)) ? "1" : "0");
+ }
+ }
}
}
@@ -122,6 +111,12 @@ int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RemoveDuplicates();
+ TrimEntries(&bits_per_entry);
+
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+ // We cannot emit a const pool within the safepoint table.
+ Assembler::BlockConstPoolScope block_const_pool(assembler);
+#endif
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(Code::kMetadataAlignment);
@@ -168,6 +163,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// Run through the indexes and build a bitmap.
for (int idx : *indexes) {
+ DCHECK_GT(bits_per_entry, idx);
int index = bits_per_entry - 1 - idx;
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
@@ -203,6 +199,28 @@ void SafepointTableBuilder::RemoveDuplicates() {
deoptimization_info_.front().pc = kMaxUInt32;
}
+void SafepointTableBuilder::TrimEntries(int* bits_per_entry) {
+ int min_index = *bits_per_entry;
+ if (min_index == 0) return; // Early exit: nothing to trim.
+
+ for (auto& info : deoptimization_info_) {
+ for (int idx : *info.stack_indexes) {
+ DCHECK_GT(*bits_per_entry, idx); // Validity check.
+ if (idx >= min_index) continue;
+ if (idx == 0) return; // Early exit: nothing to trim.
+ min_index = idx;
+ }
+ }
+
+ DCHECK_LT(0, min_index);
+ *bits_per_entry -= min_index;
+ for (auto& info : deoptimization_info_) {
+ for (int& idx : *info.stack_indexes) {
+ idx -= min_index;
+ }
+ }
+}
+
bool SafepointTableBuilder::IsIdenticalExceptForPc(
const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
if (info1.deopt_index != info2.deopt_index) return false;
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 623b524698..07bbcaf9a0 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
#define V8_CODEGEN_SAFEPOINT_TABLE_H_
+#include "src/base/iterator.h"
#include "src/base/memory.h"
#include "src/common/assert-scope.h"
#include "src/utils/allocation.h"
@@ -21,11 +22,14 @@ class WasmCode;
class SafepointEntry {
public:
- SafepointEntry()
- : deopt_index_(0), bits_(nullptr), trampoline_pc_(kNoTrampolinePC) {}
-
- SafepointEntry(unsigned deopt_index, uint8_t* bits, int trampoline_pc)
- : deopt_index_(deopt_index), bits_(bits), trampoline_pc_(trampoline_pc) {
+ SafepointEntry() = default;
+
+ SafepointEntry(unsigned deopt_index, uint8_t* bits, uint8_t* bits_end,
+ int trampoline_pc)
+ : deopt_index_(deopt_index),
+ bits_(bits),
+ bits_end_(bits_end),
+ trampoline_pc_(trampoline_pc) {
DCHECK(is_valid());
}
@@ -38,6 +42,7 @@ class SafepointEntry {
void Reset() {
deopt_index_ = 0;
bits_ = nullptr;
+ bits_end_ = nullptr;
}
int trampoline_pc() { return trampoline_pc_; }
@@ -67,16 +72,23 @@ class SafepointEntry {
return deopt_index_ != kNoDeoptIndex;
}
- uint8_t* bits() {
+ uint8_t* bits() const {
DCHECK(is_valid());
return bits_;
}
+ base::iterator_range<uint8_t*> iterate_bits() const {
+ return base::make_iterator_range(bits_, bits_end_);
+ }
+
+ size_t entry_size() const { return bits_end_ - bits_; }
+
private:
- uint32_t deopt_index_;
- uint8_t* bits_;
+ uint32_t deopt_index_ = 0;
+ uint8_t* bits_ = nullptr;
+ uint8_t* bits_end_ = nullptr;
// It needs to be an integer as it is -1 for eager deoptimizations.
- int trampoline_pc_;
+ int trampoline_pc_ = kNoTrampolinePC;
};
class SafepointTable {
@@ -117,17 +129,17 @@ class SafepointTable {
int trampoline_pc = has_deopt_
? base::Memory<int>(GetTrampolineLocation(index))
: SafepointEntry::kNoTrampolinePC;
- return SafepointEntry(deopt_index, bits, trampoline_pc);
+ return SafepointEntry(deopt_index, bits, bits + entry_size_, trampoline_pc);
}
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
- void PrintEntry(unsigned index, std::ostream& os) const; // NOLINT
+ void PrintEntry(unsigned index, std::ostream& os) const;
private:
SafepointTable(Address instruction_start, Address safepoint_table_address,
- uint32_t stack_slots, bool has_deopt);
+ bool has_deopt);
static const uint8_t kNoRegisters = 0xFF;
@@ -165,12 +177,9 @@ class SafepointTable {
return GetPcOffsetLocation(index) + kTrampolinePcOffset;
}
- static void PrintBits(std::ostream& os, uint8_t byte, int digits);
-
DISALLOW_GARBAGE_COLLECTION(no_gc_)
const Address instruction_start_;
- const uint32_t stack_slots_;
const bool has_deopt_;
// Safepoint table layout.
@@ -254,6 +263,10 @@ class SafepointTableBuilder {
// If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
void RemoveDuplicates();
+ // Try to trim entries by removing trailing zeros (and shrinking
+ // {bits_per_entry}).
+ void TrimEntries(int* bits_per_entry);
+
ZoneChunkList<DeoptimizationInfo> deoptimization_info_;
unsigned offset_;
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 366d1afac9..3a73ae09f8 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -29,6 +29,174 @@ void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
}
}
+void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, uint8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vshufps(dst, src1, src2, imm8);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ shufps(dst, src2, imm8);
+ }
+}
+
+void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
+ uint8_t lane) {
+ if (lane == 0) {
+ if (dst != src) {
+ Movaps(dst, src);
+ }
+ } else {
+ DCHECK_EQ(1, lane);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Pass src as operand to avoid false-dependency on dst.
+ vmovhlps(dst, src, src);
+ } else {
+ movhlps(dst, src);
+ }
+ }
+}
+
+void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
+ DoubleRegister rep, uint8_t lane) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ if (lane == 0) {
+ vpblendw(dst, src, rep, 0b00001111);
+ } else {
+ vmovlhps(dst, src, rep);
+ }
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ DCHECK_NE(dst, rep); // Ensure rep is not overwritten.
+ movaps(dst, src);
+ }
+ if (lane == 0) {
+ pblendw(dst, rep, 0b00001111);
+ } else {
+ movlhps(dst, rep);
+ }
+ }
+}
+
+void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the resuls, and adjust.
+ vminpd(scratch, lhs, rhs);
+ vminpd(dst, rhs, lhs);
+ // propagate -0's and NaNs, which may be non-canonical.
+ vorpd(scratch, scratch, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ vcmpunordpd(dst, dst, scratch);
+ vorpd(scratch, scratch, dst);
+ vpsrlq(dst, dst, byte{13});
+ vandnpd(dst, dst, scratch);
+ } else {
+ // Compare lhs with rhs, and rhs with lhs, and have the results in scratch
+ // and dst. If dst overlaps with lhs or rhs, we can save a move.
+ if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ minpd(scratch, dst);
+ minpd(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ movaps(dst, rhs);
+ minpd(scratch, rhs);
+ minpd(dst, lhs);
+ }
+ orpd(scratch, dst);
+ cmpunordpd(dst, scratch);
+ orpd(scratch, dst);
+ psrlq(dst, byte{13});
+ andnpd(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ // The maxpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+ vmaxpd(scratch, lhs, rhs);
+ vmaxpd(dst, rhs, lhs);
+ // Find discrepancies.
+ vxorpd(dst, dst, scratch);
+ // Propagate NaNs, which may be non-canonical.
+ vorpd(scratch, scratch, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ vsubpd(scratch, scratch, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ vcmpunordpd(dst, dst, scratch);
+ vpsrlq(dst, dst, byte{13});
+ vandnpd(dst, dst, scratch);
+ } else {
+ if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ maxpd(scratch, dst);
+ maxpd(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ movaps(dst, rhs);
+ maxpd(scratch, rhs);
+ maxpd(dst, lhs);
+ }
+ xorpd(dst, scratch);
+ orpd(scratch, dst);
+ subpd(scratch, dst);
+ cmpunordpd(dst, scratch);
+ psrlq(dst, byte{13});
+ andnpd(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vbroadcastss(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vshufps(dst, src, src, 0);
+ } else {
+ if (dst == src) {
+ // 1 byte shorter than pshufd.
+ shufps(dst, src, 0);
+ } else {
+ pshufd(dst, src, 0);
+ }
+ }
+}
+
+void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
+ uint8_t lane) {
+ DCHECK_LT(lane, 4);
+ // These instructions are shorter than insertps, but will leave junk in
+ // the top lanes of dst.
+ if (lane == 0) {
+ if (dst != src) {
+ Movaps(dst, src);
+ }
+ } else if (lane == 1) {
+ Movshdup(dst, src);
+ } else if (lane == 2 && dst == src) {
+ // Check dst == src to avoid false dependency on dst.
+ Movhlps(dst, src);
+ } else if (dst == src) {
+ Shufps(dst, src, src, lane);
+ } else {
+ Pshufd(dst, src, lane);
+ }
+}
+
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
if (laneidx == 0) {
@@ -233,6 +401,22 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
}
}
+void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpxor(scratch, scratch, scratch);
+ vpsubq(dst, scratch, src);
+ } else {
+ if (dst == src) {
+ movaps(scratch, src);
+ std::swap(src, scratch);
+ }
+ pxor(dst, dst);
+ psubq(dst, src);
+ }
+}
+
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -379,6 +563,17 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
}
}
+void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (dst == src) {
+ Pcmpeqd(scratch, scratch);
+ Pxor(dst, scratch);
+ } else {
+ Pcmpeqd(dst, dst);
+ Pxor(dst, src);
+ }
+}
+
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2,
XMMRegister scratch) {
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index e2778e472d..6be9444c65 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -39,121 +39,252 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
}
}
- template <typename Dst, typename... Args>
+ // Shufps that will mov src1 into dst if AVX is not supported.
+ void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ uint8_t imm8);
+
+ // Helper struct to implement functions that check for AVX support and
+ // dispatch to the appropriate AVX/SSE instruction.
+ template <typename Dst, typename Arg, typename... Args>
struct AvxHelper {
Assembler* assm;
base::Optional<CpuFeature> feature = base::nullopt;
// Call a method where the AVX version expects the dst argument to be
// duplicated.
- template <void (Assembler::*avx)(Dst, Dst, Args...),
+ // E.g. Andps(x, y) -> vandps(x, x, y)
+ // -> andps(x, y)
+ template <void (Assembler::*avx)(Dst, Dst, Arg, Args...),
+ void (Assembler::*no_avx)(Dst, Arg, Args...)>
+ void emit(Dst dst, Arg arg, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, dst, arg, args...);
+ } else if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*no_avx)(dst, arg, args...);
+ } else {
+ (assm->*no_avx)(dst, arg, args...);
+ }
+ }
+
+ // Call a method in the AVX form (one more operand), but if unsupported will
+ // check that dst == first src.
+ // E.g. Andps(x, y, z) -> vandps(x, y, z)
+ // -> andps(x, z) and check that x == y
+ template <void (Assembler::*avx)(Dst, Arg, Args...),
void (Assembler::*no_avx)(Dst, Args...)>
- void emit(Dst dst, Args... args) {
+ void emit(Dst dst, Arg arg, Args... args) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, dst, args...);
+ (assm->*avx)(dst, arg, args...);
} else if (feature.has_value()) {
+ DCHECK_EQ(dst, arg);
DCHECK(CpuFeatures::IsSupported(*feature));
CpuFeatureScope scope(assm, *feature);
(assm->*no_avx)(dst, args...);
} else {
+ DCHECK_EQ(dst, arg);
(assm->*no_avx)(dst, args...);
}
}
// Call a method where the AVX version expects no duplicated dst argument.
- template <void (Assembler::*avx)(Dst, Args...),
- void (Assembler::*no_avx)(Dst, Args...)>
- void emit(Dst dst, Args... args) {
+ // E.g. Movddup(x, y) -> vmovddup(x, y)
+ // -> movddup(x, y)
+ template <void (Assembler::*avx)(Dst, Arg, Args...),
+ void (Assembler::*no_avx)(Dst, Arg, Args...)>
+ void emit(Dst dst, Arg arg, Args... args) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, args...);
+ (assm->*avx)(dst, arg, args...);
} else if (feature.has_value()) {
DCHECK(CpuFeatures::IsSupported(*feature));
CpuFeatureScope scope(assm, *feature);
- (assm->*no_avx)(dst, args...);
+ (assm->*no_avx)(dst, arg, args...);
} else {
- (assm->*no_avx)(dst, args...);
+ (assm->*no_avx)(dst, arg, args...);
}
}
};
-#define AVX_OP(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSE3(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE3)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSE3(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSE3)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSSE3(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSSE3)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSSE3(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSSE3)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSE4_1(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_1)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSE4_1(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSE4_1)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSE4_2(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_2)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSE4_2(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSE4_2)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
+ // Keep this list sorted by required extension, then instruction name.
+ AVX_OP(Addpd, addpd)
+ AVX_OP(Addps, addps)
+ AVX_OP(Andnpd, andnpd)
+ AVX_OP(Andnps, andnps)
+ AVX_OP(Andpd, andpd)
+ AVX_OP(Andps, andps)
+ AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmplepd, cmplepd)
+ AVX_OP(Cmpleps, cmpleps)
+ AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpunordpd, cmpunordpd)
+ AVX_OP(Cmpunordps, cmpunordps)
AVX_OP(Cvtdq2pd, cvtdq2pd)
AVX_OP(Cvtdq2ps, cvtdq2ps)
- AVX_OP(Cvtps2pd, cvtps2pd)
AVX_OP(Cvtpd2ps, cvtpd2ps)
+ AVX_OP(Cvtps2pd, cvtps2pd)
AVX_OP(Cvttps2dq, cvttps2dq)
+ AVX_OP(Divpd, divpd)
+ AVX_OP(Divps, divps)
+ AVX_OP(Maxpd, maxpd)
+ AVX_OP(Maxps, maxps)
+ AVX_OP(Minpd, minpd)
+ AVX_OP(Minps, minps)
AVX_OP(Movaps, movaps)
AVX_OP(Movd, movd)
+ AVX_OP(Movhlps, movhlps)
AVX_OP(Movhps, movhps)
AVX_OP(Movlps, movlps)
AVX_OP(Movmskpd, movmskpd)
AVX_OP(Movmskps, movmskps)
- AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
+ AVX_OP(Movss, movss)
AVX_OP(Movupd, movupd)
AVX_OP(Movups, movups)
+ AVX_OP(Mulpd, mulpd)
+ AVX_OP(Mulps, mulps)
+ AVX_OP(Orpd, orpd)
+ AVX_OP(Orps, orps)
+ AVX_OP(Packssdw, packssdw)
+ AVX_OP(Packsswb, packsswb)
+ AVX_OP(Packuswb, packuswb)
+ AVX_OP(Paddb, paddb)
+ AVX_OP(Paddd, paddd)
+ AVX_OP(Paddq, paddq)
+ AVX_OP(Paddsb, paddsb)
+ AVX_OP(Paddusb, paddusb)
+ AVX_OP(Paddusw, paddusw)
+ AVX_OP(Paddw, paddw)
+ AVX_OP(Pand, pand)
+ AVX_OP(Pavgb, pavgb)
+ AVX_OP(Pavgw, pavgw)
+ AVX_OP(Pcmpgtb, pcmpgtb)
+ AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pmaxub, pmaxub)
+ AVX_OP(Pminub, pminub)
AVX_OP(Pmovmskb, pmovmskb)
AVX_OP(Pmullw, pmullw)
- AVX_OP(Pshuflw, pshuflw)
- AVX_OP(Pshufhw, pshufhw)
+ AVX_OP(Pmuludq, pmuludq)
+ AVX_OP(Por, por)
AVX_OP(Pshufd, pshufd)
+ AVX_OP(Pshufhw, pshufhw)
+ AVX_OP(Pshuflw, pshuflw)
+ AVX_OP(Pslld, pslld)
+ AVX_OP(Psllq, psllq)
+ AVX_OP(Psllw, psllw)
+ AVX_OP(Psrad, psrad)
+ AVX_OP(Psraw, psraw)
+ AVX_OP(Psrld, psrld)
+ AVX_OP(Psrlq, psrlq)
+ AVX_OP(Psrlw, psrlw)
+ AVX_OP(Psubb, psubb)
+ AVX_OP(Psubd, psubd)
+ AVX_OP(Psubq, psubq)
+ AVX_OP(Psubsb, psubsb)
+ AVX_OP(Psubusb, psubusb)
+ AVX_OP(Psubw, psubw)
+ AVX_OP(Punpckhbw, punpckhbw)
+ AVX_OP(Punpckhdq, punpckhdq)
+ AVX_OP(Punpckhqdq, punpckhqdq)
+ AVX_OP(Punpckhwd, punpckhwd)
+ AVX_OP(Punpcklbw, punpcklbw)
+ AVX_OP(Punpckldq, punpckldq)
+ AVX_OP(Punpcklqdq, punpcklqdq)
+ AVX_OP(Punpcklwd, punpcklwd)
+ AVX_OP(Pxor, pxor)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
- AVX_OP(Sqrtps, sqrtps)
AVX_OP(Sqrtpd, sqrtpd)
+ AVX_OP(Sqrtps, sqrtps)
+ AVX_OP(Sqrtsd, sqrtsd)
+ AVX_OP(Sqrtss, sqrtss)
+ AVX_OP(Subpd, subpd)
+ AVX_OP(Subps, subps)
+ AVX_OP(Unpcklps, unpcklps)
+ AVX_OP(Xorpd, xorpd)
+ AVX_OP(Xorps, xorps)
+
+ AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSE3(Movddup, movddup)
AVX_OP_SSE3(Movshdup, movshdup)
+
AVX_OP_SSSE3(Pabsb, pabsb)
- AVX_OP_SSSE3(Pabsw, pabsw)
AVX_OP_SSSE3(Pabsd, pabsd)
+ AVX_OP_SSSE3(Pabsw, pabsw)
+ AVX_OP_SSSE3(Palignr, palignr)
+ AVX_OP_SSSE3(Psignb, psignb)
+ AVX_OP_SSSE3(Psignd, psignd)
+ AVX_OP_SSSE3(Psignw, psignw)
+
AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Pextrb, pextrb)
AVX_OP_SSE4_1(Pextrw, pextrw)
+ AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
+ AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
+ AVX_OP_SSE4_1(Pminsb, pminsb)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
- AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
+ AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
- AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
+ AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Ptest, ptest)
- AVX_OP_SSE4_1(Roundps, roundps)
AVX_OP_SSE4_1(Roundpd, roundpd)
+ AVX_OP_SSE4_1(Roundps, roundps)
+ void F64x2ExtractLane(DoubleRegister dst, XMMRegister src, uint8_t lane);
+ void F64x2ReplaceLane(XMMRegister dst, XMMRegister src, DoubleRegister rep,
+ uint8_t lane);
+ void F64x2Min(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
+ void F64x2Max(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
+ void F32x4Splat(XMMRegister dst, DoubleRegister src);
+ void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
void I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scrat, bool is_signed);
@@ -170,6 +301,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ void I64x2Neg(XMMRegister dst, XMMRegister src, XMMRegister scratch);
void I64x2Abs(XMMRegister dst, XMMRegister src, XMMRegister scratch);
void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
XMMRegister scratch);
@@ -180,6 +312,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch);
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
XMMRegister src2, XMMRegister scratch);
diff --git a/deps/v8/src/codegen/signature.h b/deps/v8/src/codegen/signature.h
index 2c4ca3e0d9..d6d8b5da0f 100644
--- a/deps/v8/src/codegen/signature.h
+++ b/deps/v8/src/codegen/signature.h
@@ -104,6 +104,14 @@ class Signature : public ZoneObject {
T* buffer_;
};
+ static Signature<T>* Build(Zone* zone, std::initializer_list<T> returns,
+ std::initializer_list<T> params) {
+ Builder builder(zone, returns.size(), params.size());
+ for (T ret : returns) builder.AddReturn(ret);
+ for (T param : params) builder.AddParam(param);
+ return builder.Build();
+ }
+
static constexpr size_t kReturnCountOffset = 0;
static constexpr size_t kParameterCountOffset =
kReturnCountOffset + kSizetSize;
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 63f1d17c70..27466a2690 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -170,9 +170,9 @@ void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
#endif
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (bytes_.empty()) return isolate->factory()->empty_byte_array();
DCHECK(!Omit());
diff --git a/deps/v8/src/codegen/source-position-table.h b/deps/v8/src/codegen/source-position-table.h
index 72a4c9f45a..afd7cc434c 100644
--- a/deps/v8/src/codegen/source-position-table.h
+++ b/deps/v8/src/codegen/source-position-table.h
@@ -54,9 +54,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
+ Handle<ByteArray> ToSourcePositionTable(IsolateT* isolate);
OwnedVector<byte> ToSourcePositionTableVector();
inline bool Omit() const { return mode_ != RECORD_SOURCE_POSITIONS; }
diff --git a/deps/v8/src/codegen/string-constants.cc b/deps/v8/src/codegen/string-constants.cc
index 92a5e97396..c1ad5a7b4b 100644
--- a/deps/v8/src/codegen/string-constants.cc
+++ b/deps/v8/src/codegen/string-constants.cc
@@ -5,7 +5,6 @@
#include "src/codegen/string-constants.h"
#include "src/base/functional.h"
-#include "src/numbers/dtoa.h"
#include "src/objects/objects.h"
#include "src/objects/string-inl.h"
@@ -176,7 +175,7 @@ size_t StringConstantBase::GetMaxStringConstantLength() const {
size_t StringLiteral::GetMaxStringConstantLength() const { return length_; }
size_t NumberToStringConstant::GetMaxStringConstantLength() const {
- return kBase10MaximalLength + 1;
+ return kMaxDoubleStringLength;
}
size_t StringCons::GetMaxStringConstantLength() const {
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index e4c694097b..a9f9e08ead 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -115,12 +115,5 @@ bool TurboAssemblerBase::IsAddressableThroughRootRegister(
return isolate->root_register_addressable_region().contains(address);
}
-void TurboAssemblerBase::RecordCommentForOffHeapTrampoline(int builtin_index) {
- if (!FLAG_code_comments) return;
- std::ostringstream str;
- str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
- RecordComment(str.str().c_str());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index cc9ef92919..e25ee2a629 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -118,13 +118,18 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
-#ifdef V8_TARGET_OS_WIN
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
// Minimum page size. We must touch memory once per page when expanding the
// stack, to avoid access violations.
static constexpr int kStackPageSize = 4 * KB;
#endif
- void RecordCommentForOffHeapTrampoline(int builtin_index);
+ V8_INLINE void RecordCommentForOffHeapTrampoline(int builtin_index) {
+ if (!FLAG_code_comments) return;
+ std::ostringstream str;
+ str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
+ RecordComment(str.str().c_str());
+ }
protected:
Isolate* const isolate_ = nullptr;
@@ -150,8 +155,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
};
// Avoids emitting calls to the {Builtins::kAbort} builtin when emitting debug
-// code during the lifetime of this scope object. For disabling debug code
-// entirely use the {DontEmitDebugCodeScope} instead.
+// code during the lifetime of this scope object.
class V8_NODISCARD HardAbortScope {
public:
explicit HardAbortScope(TurboAssemblerBase* assembler)
@@ -165,27 +169,6 @@ class V8_NODISCARD HardAbortScope {
bool old_value_;
};
-#ifdef DEBUG
-struct CountIfValidRegisterFunctor {
- template <typename RegType>
- constexpr int operator()(int count, RegType reg) const {
- return count + (reg.is_valid() ? 1 : 0);
- }
-};
-
-template <typename RegType, typename... RegTypes,
- // All arguments must be either Register or DoubleRegister.
- typename = typename std::enable_if<
- base::is_same<Register, RegType, RegTypes...>::value ||
- base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
-inline bool AreAliased(RegType first_reg, RegTypes... regs) {
- int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
- int num_given_regs =
- base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
- return num_different_regs < num_given_regs;
-}
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 032f7eb13d..eb07f3ba3b 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -86,45 +86,40 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
-
- // To deal with any combination of flags (e.g. --no-enable-sse4-1
- // --enable-sse-4-2), we start checking from the "highest" supported
- // extension, for each extension, enable if newer extension is supported.
- if (cpu.has_avx2() && FLAG_enable_avx2 && IsSupported(AVX)) {
- supported_ |= 1u << AVX2;
- }
- if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
- OSHasAVXSupport()) {
- supported_ |= 1u << FMA3;
- }
- if ((cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
- OSHasAVXSupport()) ||
- IsSupported(AVX2) || IsSupported(FMA3)) {
- supported_ |= 1u << AVX;
- }
- if ((cpu.has_sse42() && FLAG_enable_sse4_2) || IsSupported(AVX)) {
- supported_ |= 1u << SSE4_2;
- }
- if ((cpu.has_sse41() && FLAG_enable_sse4_1) || IsSupported(SSE4_2)) {
- supported_ |= 1u << SSE4_1;
- }
- if ((cpu.has_ssse3() && FLAG_enable_ssse3) || IsSupported(SSE4_1)) {
- supported_ |= 1u << SSSE3;
+ if (cpu.has_sse42()) SetSupported(SSE4_2);
+ if (cpu.has_sse41()) SetSupported(SSE4_1);
+ if (cpu.has_ssse3()) SetSupported(SSSE3);
+ if (cpu.has_sse3()) SetSupported(SSE3);
+ if (cpu.has_avx() && cpu.has_osxsave() && OSHasAVXSupport()) {
+ SetSupported(AVX);
+ if (cpu.has_avx2()) SetSupported(AVX2);
+ if (cpu.has_fma3()) SetSupported(FMA3);
}
- if ((cpu.has_sse3() && FLAG_enable_sse3) || IsSupported(SSSE3))
- supported_ |= 1u << SSE3;
+
// SAHF is not generally available in long mode.
- if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
- if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
- if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
- if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
- if (cpu.has_popcnt() && FLAG_enable_popcnt) supported_ |= 1u << POPCNT;
+ if (cpu.has_sahf() && FLAG_enable_sahf) SetSupported(SAHF);
+ if (cpu.has_bmi1() && FLAG_enable_bmi1) SetSupported(BMI1);
+ if (cpu.has_bmi2() && FLAG_enable_bmi2) SetSupported(BMI2);
+ if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
+ if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) supported_ |= 1u << ATOM;
+ if (cpu.is_atom()) SetSupported(ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- supported_ |= 1u << ATOM;
+ SetSupported(ATOM);
}
+ // Ensure that supported cpu features make sense. E.g. it is wrong to support
+ // AVX but not SSE4_2, if we have --enable-avx and --no-enable-sse4-2, the
+ // code above would set AVX to supported, and SSE4_2 to unsupported, then the
+ // checks below will set AVX to unsupported.
+ if (!FLAG_enable_sse3) SetUnsupported(SSE3);
+ if (!FLAG_enable_ssse3 || !IsSupported(SSE3)) SetUnsupported(SSSE3);
+ if (!FLAG_enable_sse4_1 || !IsSupported(SSSE3)) SetUnsupported(SSE4_1);
+ if (!FLAG_enable_sse4_2 || !IsSupported(SSE4_1)) SetUnsupported(SSE4_2);
+ if (!FLAG_enable_avx || !IsSupported(SSE4_2)) SetUnsupported(AVX);
+ if (!FLAG_enable_avx2 || !IsSupported(AVX)) SetUnsupported(AVX2);
+ if (!FLAG_enable_fma3 || !IsSupported(AVX)) SetUnsupported(FMA3);
+
// Set a static value on whether Simd is supported.
// This variable is only used for certain archs to query SupportWasmSimd128()
// at runtime in builtins using an extern ref. Other callers should use
@@ -1419,12 +1414,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
- DCHECK(RelocInfo::IsRuntimeEntry(rmode));
+ DCHECK(RelocInfo::IsWasmStubCall(rmode));
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
emit(0x0F);
emit(0x80 | cc);
- emit_runtime_entry(entry, rmode);
+ RecordRelocInfo(rmode);
+ emitl(static_cast<int32_t>(entry));
}
void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 97e18ed8fe..e6205311c2 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -124,6 +124,9 @@ class Immediate {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
}
+ int32_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
const int32_t value_;
const RelocInfo::Mode rmode_ = RelocInfo::NONE;
@@ -1274,6 +1277,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE_CMP_P(cmpeq, 0x0)
SSE_CMP_P(cmplt, 0x1)
SSE_CMP_P(cmple, 0x2)
+ SSE_CMP_P(cmpunord, 0x3)
SSE_CMP_P(cmpneq, 0x4)
SSE_CMP_P(cmpnlt, 0x5)
SSE_CMP_P(cmpnle, 0x6)
@@ -1571,6 +1575,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX_CMP_P(vcmpeq, 0x0)
AVX_CMP_P(vcmplt, 0x1)
AVX_CMP_P(vcmple, 0x2)
+ AVX_CMP_P(vcmpunord, 0x3)
AVX_CMP_P(vcmpneq, 0x4)
AVX_CMP_P(vcmpnlt, 0x5)
AVX_CMP_P(vcmpnle, 0x6)
@@ -2374,8 +2379,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// checks that we did not generate too much.
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+ explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
@@ -2389,7 +2394,7 @@ class EnsureSpace {
#endif
private:
- Assembler* assembler_;
+ Assembler* const assembler_;
#ifdef DEBUG
int space_before_;
#endif
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
new file mode 100644
index 0000000000..a24330a4c7
--- /dev/null
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -0,0 +1,258 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_X64_INTERFACE_DESCRIPTORS_X64_INL_H_
+#define V8_CODEGEN_X64_INTERFACE_DESCRIPTORS_X64_INL_H_
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(rax, rbx, rcx, rdx, rdi);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2, arg_reg_3, arg_reg_4,
+ kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
+ kRuntimeCallFunctionRegister, kContextRegister);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2, arg_reg_3, arg_reg_4,
+ kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return rdx; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return rcx; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return rax; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return rdi;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return rdx; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return rcx; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return rax; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return rdi; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r11; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return rcx; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return rbx;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ return rcx;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(rbx); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // rax : number of arguments
+ // rdi : the target to call
+ return RegisterArray(rdi, rax);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rcx : arguments list length (untagged)
+ // rbx : arguments list (FixedArray)
+ return RegisterArray(rdi, rax, rcx, rbx);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // rax : number of arguments
+ // rcx : start index (to support rest parameters)
+ // rdi : the target to call
+ return RegisterArray(rdi, rax, rcx);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // rdx: the function template info
+ // rcx: number of arguments (on the stack, not including receiver)
+ return RegisterArray(rdx, rcx);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : the object to spread
+ return RegisterArray(rdi, rax, rbx);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // rdi : the target to call
+ // rbx : the arguments list
+ return RegisterArray(rdi, rbx);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rcx : arguments list length (untagged)
+ // rbx : arguments list (FixedArray)
+ return RegisterArray(rdi, rdx, rax, rcx, rbx);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // rax : number of arguments
+ // rdx : the new target
+ // rcx : start index (to support rest parameters)
+ // rdi : the target to call
+ return RegisterArray(rdi, rdx, rax, rcx);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the object to spread
+ return RegisterArray(rdi, rdx, rax, rbx);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the arguments list
+ return RegisterArray(rdi, rdx, rbx);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // rax : number of arguments
+ // rdx : the new target
+ // rdi : the target to call
+ // rbx : allocation site or undefined
+ return RegisterArray(rdi, rdx, rax, rbx);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(rdx); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ return RegisterArray(rdx, rax);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ return RegisterArray(rdx, rax);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ return RegisterArray(rdx, rax, rbx);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ return RegisterArray(rdx, rax, rbx);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(rdx, // api function address
+ rcx, // argument count (not including receiver)
+ rbx, // call data
+ rdi); // holder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(rax, // argument count (not including receiver)
+ rbx, // address of first argument
+ rdi); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ rax, // argument count (not including receiver)
+ rcx, // address of first argument
+ rdi, // constructor to call
+ rdx, // new target
+ rbx); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(
+ rax, // the value to pass to the generator
+ rdx); // the JSGeneratorObject / JSAsyncGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_X64
+
+#endif // V8_CODEGEN_X64_INTERFACE_DESCRIPTORS_X64_INL_H_
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
deleted file mode 100644
index 4029b56d2b..0000000000
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {rax, rbx, rcx, rdx, rdi};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {arg_reg_1, arg_reg_2, arg_reg_3,
- arg_reg_4, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0,
- arg_reg_1,
- arg_reg_2,
- arg_reg_3,
- kRuntimeCallFunctionRegister,
- kContextRegister};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {arg_reg_1, arg_reg_2, arg_reg_3,
- arg_reg_4, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return rdx; }
-const Register LoadDescriptor::NameRegister() { return rcx; }
-const Register LoadDescriptor::SlotRegister() { return rax; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return rdi;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return rdx; }
-const Register StoreDescriptor::NameRegister() { return rcx; }
-const Register StoreDescriptor::ValueRegister() { return rax; }
-const Register StoreDescriptor::SlotRegister() { return rdi; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return rdi; }
-const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
-const Register StoreTransitionDescriptor::MapRegister() { return r11; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
-const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- return rbx;
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return rcx; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdi : the target to call
- Register registers[] = {rdi, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rcx : arguments list length (untagged)
- // rbx : arguments list (FixedArray)
- Register registers[] = {rdi, rax, rcx, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rcx : start index (to support rest parameters)
- // rdi : the target to call
- Register registers[] = {rdi, rax, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdx: the function template info
- // rcx: number of arguments (on the stack, not including receiver)
- Register registers[] = {rdx, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rbx : the object to spread
- Register registers[] = {rdi, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rbx : the arguments list
- Register registers[] = {rdi, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rdx : the new target
- // rcx : arguments list length (untagged)
- // rbx : arguments list (FixedArray)
- Register registers[] = {rdi, rdx, rax, rcx, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdx : the new target
- // rcx : start index (to support rest parameters)
- // rdi : the target to call
- Register registers[] = {rdi, rdx, rax, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rdx : the new target
- // rbx : the object to spread
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rdx : the new target
- // rbx : the arguments list
- Register registers[] = {rdi, rdx, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdx : the new target
- // rdi : the target to call
- // rbx : allocation site or undefined
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rdx, // api function address
- rcx, // argument count (not including receiver)
- rbx, // call data
- rdi, // holder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // argument count (not including receiver)
- rbx, // address of first argument
- rdi // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // argument count (not including receiver)
- rcx, // address of first argument
- rdi, // constructor to call
- rdx, // new target
- rbx, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // the value to pass to the generator
- rdx // the JSGeneratorObject / JSAsyncGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rbx, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {arg_reg_1, arg_reg_2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index cb254370b2..53f3f97f9a 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/string-constants.h"
@@ -194,6 +195,9 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
void TurboAssembler::LoadMap(Register destination, Register object) {
LoadTaggedPointerField(destination,
FieldOperand(object, HeapObject::kMapOffset));
+#ifdef V8_MAP_PACKING
+ UnpackMapWord(destination);
+#endif
}
void TurboAssembler::LoadTaggedPointerField(Register destination,
@@ -205,6 +209,16 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
}
}
+#ifdef V8_MAP_PACKING
+void TurboAssembler::UnpackMapWord(Register r) {
+ // Clear the top two bytes (which may include metadata). Must be in sync with
+ // MapWord::Unpack, and vice versa.
+ shlq(r, Immediate(16));
+ shrq(r, Immediate(16));
+ xorq(r, Immediate(Internals::kMapWordXorMask));
+}
+#endif
+
void TurboAssembler::LoadTaggedSignedField(Register destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
@@ -288,7 +302,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
movl(destination, field_operand);
- addq(destination, kPointerCageBaseRegister);
+ addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
@@ -296,7 +310,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
movl(destination, source);
- addq(destination, kPointerCageBaseRegister);
+ addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
@@ -304,7 +318,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand) {
RecordComment("[ DecompressAnyTagged");
movl(destination, field_operand);
- addq(destination, kPointerCageBaseRegister);
+ addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
@@ -318,7 +332,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -327,7 +341,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
leaq(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
testb(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
@@ -336,13 +350,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Move(value, kZapValue, RelocInfo::NONE);
Move(dst, kZapValue, RelocInfo::NONE);
}
@@ -357,19 +371,27 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
}
-void TurboAssembler::LoadExternalPointerField(Register destination,
- Operand field_operand,
- ExternalPointerTag tag) {
+void TurboAssembler::LoadExternalPointerField(
+ Register destination, Operand field_operand, ExternalPointerTag tag,
+ Register scratch, IsolateRootLocation isolateRootLocation) {
#ifdef V8_HEAP_SANDBOX
- LoadAddress(kScratchRegister,
- ExternalReference::external_pointer_table_address(isolate()));
- movq(kScratchRegister,
- Operand(kScratchRegister, Internals::kExternalPointerTableBufferOffset));
+ DCHECK(!field_operand.AddressUsesRegister(scratch));
+ if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
+ DCHECK(root_array_available_);
+ movq(scratch, Operand(kRootRegister,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ } else {
+ DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
+ movq(scratch,
+ Operand(scratch, IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ }
movl(destination, field_operand);
- movq(destination, Operand(kScratchRegister, destination, times_8, 0));
+ movq(destination, Operand(scratch, destination, times_8, 0));
if (tag != 0) {
- movq(kScratchRegister, Immediate64(tag));
- xorq(destination, kScratchRegister);
+ movq(scratch, Immediate64(~tag));
+ andq(destination, scratch);
}
#else
movq(destination, field_operand);
@@ -480,13 +502,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(value != address);
AssertNotSmi(object);
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
}
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
cmp_tagged(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -498,7 +520,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
// Skip barrier if writing a smi.
JumpIfSmi(value, &done);
}
@@ -519,18 +541,18 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Move(address, kZapValue, RelocInfo::NONE);
Move(value, kZapValue, RelocInfo::NONE);
}
}
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
- if (emit_debug_code()) Check(cc, reason);
+ if (FLAG_debug_code) Check(cc, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cc, AbortReason reason) {
@@ -556,11 +578,11 @@ void TurboAssembler::CheckStackAlignment() {
}
void TurboAssembler::Abort(AbortReason reason) {
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -603,7 +625,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(rax, num_arguments);
+ Move(rax, num_arguments);
LoadAddress(rbx, ExternalReference::Create(f));
Handle<Code> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
@@ -624,7 +646,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
- Set(rax, function->nargs);
+ Move(rax, function->nargs);
}
JumpToExternalReference(ExternalReference::Create(fid));
}
@@ -633,8 +655,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -656,7 +678,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
// R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kDoubleSize * XMMRegister::kNumRegisters;
}
@@ -678,7 +700,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
// R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
int delta = kDoubleSize * XMMRegister::kNumRegisters;
AllocateStackSpace(delta);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@@ -694,7 +716,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(reg, Operand(rsp, i * kDoubleSize));
@@ -715,6 +737,24 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
+void TurboAssembler::Movq(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+void TurboAssembler::Movq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
// See comments in Movdqa(XMMRegister, XMMRegister).
if (CpuFeatures::IsSupported(AVX)) {
@@ -1033,7 +1073,7 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
// The input value is within uint64 range and the second conversion worked
// successfully, but we still have to undo the subtraction we did
// earlier.
- tasm->Set(kScratchRegister, 0x8000000000000000);
+ tasm->Move(kScratchRegister, 0x8000000000000000);
tasm->orq(dst, kScratchRegister);
tasm->bind(&success);
}
@@ -1055,26 +1095,6 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
-void TurboAssembler::Set(Register dst, int64_t x) {
- if (x == 0) {
- xorl(dst, dst);
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- movq(dst, x);
- }
-}
-
-void TurboAssembler::Set(Operand dst, intptr_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
- }
-}
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
@@ -1084,36 +1104,6 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
return kScratchRegister;
}
-void TurboAssembler::Move(Register dst, Smi source) {
- STATIC_ASSERT(kSmiTag == 0);
- int value = source.value();
- if (value == 0) {
- xorl(dst, dst);
- } else if (SmiValuesAre32Bits() || value < 0) {
- Move(dst, source.ptr(), RelocInfo::NONE);
- } else {
- uint32_t uvalue = static_cast<uint32_t>(source.ptr());
- if (uvalue <= 0xFF) {
- // Emit shorter instructions for small Smis
- xorl(dst, dst);
- movb(dst, Immediate(uvalue));
- } else {
- movl(dst, Immediate(uvalue));
- }
- }
-}
-
-void TurboAssembler::Move(Register dst, ExternalReference ext) {
- // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
- // non-isolate-independent code. In many cases it might be cheaper than
- // embedding the relocatable value.
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(dst, ext);
- return;
- }
- movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
-}
-
void MacroAssembler::Cmp(Register dst, int32_t src) {
if (src == 0) {
testl(dst, dst);
@@ -1335,6 +1325,39 @@ void TurboAssembler::Push(Smi source) {
// ----------------------------------------------------------------------------
+void TurboAssembler::Move(Register dst, Smi source) {
+ STATIC_ASSERT(kSmiTag == 0);
+ int value = source.value();
+ if (value == 0) {
+ xorl(dst, dst);
+ } else if (SmiValuesAre32Bits() || value < 0) {
+ Move(dst, source.ptr(), RelocInfo::NONE);
+ } else {
+ uint32_t uvalue = static_cast<uint32_t>(source.ptr());
+ Move(dst, uvalue);
+ }
+}
+
+void TurboAssembler::Move(Operand dst, intptr_t x) {
+ if (is_int32(x)) {
+ movq(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Move(kScratchRegister, x);
+ movq(dst, kScratchRegister);
+ }
+}
+
+void TurboAssembler::Move(Register dst, ExternalReference ext) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, ext);
+ return;
+ }
+ movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
+}
+
void TurboAssembler::Move(Register dst, Register src) {
if (dst != src) {
movq(dst, src);
@@ -1342,7 +1365,13 @@ void TurboAssembler::Move(Register dst, Register src) {
}
void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
-void TurboAssembler::Move(Register dst, Immediate src) { movl(dst, src); }
+void TurboAssembler::Move(Register dst, Immediate src) {
+ if (src.rmode() == RelocInfo::Mode::NONE) {
+ Move(dst, src.value());
+ } else {
+ movl(dst, src);
+ }
+}
void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
if (dst != src) {
@@ -1481,7 +1510,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Register counter = scratch;
Label loop, entry;
if (order == PushArrayOrder::kReverse) {
- Set(counter, 0);
+ Move(counter, 0);
jmp(&entry);
bind(&loop);
Push(Operand(array, counter, times_system_pointer_size, 0));
@@ -1681,7 +1710,7 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::TailCallBuiltin(int builtin_index) {
@@ -1698,7 +1727,7 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) {
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Jump(entry, RelocInfo::OFF_HEAP_TARGET);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -1854,29 +1883,6 @@ void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
}
}
-void TurboAssembler::Unpcklps(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vunpcklps(dst, src1, src2);
- } else {
- DCHECK_EQ(dst, src1);
- unpcklps(dst, src2);
- }
-}
-
-void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vshufps(dst, src1, src2, imm8);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- shufps(dst, src2, imm8);
- }
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2008,36 +2014,6 @@ void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
imm8, base::Optional<CpuFeature>(SSE4_1));
}
-void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllq(dst, dst, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- psllq(dst, imm8);
- }
-}
-
-void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlq(dst, dst, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- psrlq(dst, imm8);
- }
-}
-
-void TurboAssembler::Pslld(XMMRegister dst, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpslld(dst, dst, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- pslld(dst, imm8);
- }
-}
-
void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2375,21 +2351,6 @@ void TurboAssembler::Negpd(XMMRegister dst) {
ExternalReference::address_of_double_neg_constant()));
}
-void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
- Psrld(dst, dst, imm8);
-}
-
-void TurboAssembler::Psrld(XMMRegister dst, XMMRegister src, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrld(dst, src, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- DCHECK_EQ(dst, src);
- psrld(dst, imm8);
- }
-}
-
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2593,28 +2554,28 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertSmi(Operand object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
void TurboAssembler::AssertZeroExtended(Register int32_register) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK_NE(int32_register, kScratchRegister);
movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
@@ -2623,7 +2584,7 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
@@ -2636,7 +2597,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
@@ -2648,7 +2609,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
@@ -2659,7 +2620,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2687,12 +2648,16 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
- Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Register map = object;
+ Push(object);
+ LoadMap(map, object);
+ Cmp(map, isolate()->factory()->allocation_site_map());
+ Pop(object);
Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@@ -2737,20 +2702,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Load(rbx, restart_fp);
- testq(rbx, rbx);
-
- Label dont_drop;
- j(zero, &dont_drop, Label::kNear);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
-
- bind(&dont_drop);
-}
-
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
@@ -2801,32 +2752,32 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
- InvokeFunction(function, new_target, rbx, actual_parameter_count, flag);
+ InvokeFunction(function, new_target, rbx, actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
- DCHECK(function == rdi);
+ InvokeType type) {
+ DCHECK_EQ(function, rdi);
LoadTaggedPointerField(rsi,
FieldOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(rdi, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, rdi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
@@ -2848,17 +2799,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(rcx);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(rcx);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(rcx);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(rcx);
+ break;
}
jmp(&done, Label::kNear);
@@ -2911,7 +2864,7 @@ void MacroAssembler::StackOverflowCheck(
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
if (expected_parameter_count != actual_parameter_count) {
Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need
@@ -2937,9 +2890,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(kScratchRegister);
// Extra words are the receiver and the return address (if a jump).
- int extra_words = flag == CALL_FUNCTION ? 1 : 2;
+ int extra_words = type == InvokeType::kCall ? 1 : 2;
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
- Set(current, 0);
+ Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
bind(&copy);
movq(kScratchRegister,
@@ -3034,7 +2987,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
// TODO(v8:11429): Consider passing BASELINE instead, and checking for
// IsJSFrame or similar. Could then unify with manual frame leaves in the
// interpreter too.
- if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
+ if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
@@ -3043,11 +2996,11 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
-#ifdef V8_TARGET_OS_WIN
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
- // In windows, we cannot increment the stack size by more than one page
- // (minimum page size is 4KB) without accessing at least one byte on the
- // page. Check this:
+ // On Windows and on macOS, we cannot increment the stack size by more than
+ // one page (minimum page size is 4KB) without accessing at least one byte on
+ // the page. Check this:
// https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
Label check_offset;
Label touch_next_page;
@@ -3278,7 +3231,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
CheckStackAlignment();
}
@@ -3376,7 +3329,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::ResetSpeculationPoisonRegister() {
// TODO(turbofan): Perhaps, we want to put an lfence here.
- Set(kSpeculationPoisonRegister, -1);
+ Move(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 40ab1b925c..da5cf7dae3 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -14,6 +14,7 @@
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
#include "src/objects/contexts.h"
#include "src/objects/tagged-index.h"
@@ -25,9 +26,6 @@ using MemOperand = Operand;
class StringConstantBase;
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
: reg(index_register), scale(scale) {}
@@ -65,124 +63,43 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
AVX_OP(Subsd, subsd)
AVX_OP(Divss, divss)
AVX_OP(Divsd, divsd)
- AVX_OP(Orps, orps)
- AVX_OP(Xorps, xorps)
- AVX_OP(Xorpd, xorpd)
- AVX_OP(Movq, movq)
- AVX_OP(Movhlps, movhlps)
- AVX_OP(Pcmpeqb, pcmpeqb)
- AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP(Pcmpgtb, pcmpgtb)
AVX_OP(Pcmpgtw, pcmpgtw)
AVX_OP(Pmaxsw, pmaxsw)
- AVX_OP(Pmaxub, pmaxub)
AVX_OP(Pminsw, pminsw)
- AVX_OP(Pminub, pminub)
AVX_OP(Addss, addss)
AVX_OP(Addsd, addsd)
AVX_OP(Mulsd, mulsd)
- AVX_OP(Andps, andps)
- AVX_OP(Andnps, andnps)
- AVX_OP(Andpd, andpd)
- AVX_OP(Andnpd, andnpd)
- AVX_OP(Orpd, orpd)
AVX_OP(Cmpeqps, cmpeqps)
AVX_OP(Cmpltps, cmpltps)
- AVX_OP(Cmpleps, cmpleps)
AVX_OP(Cmpneqps, cmpneqps)
AVX_OP(Cmpnltps, cmpnltps)
AVX_OP(Cmpnleps, cmpnleps)
- AVX_OP(Cmpeqpd, cmpeqpd)
- AVX_OP(Cmpltpd, cmpltpd)
- AVX_OP(Cmplepd, cmplepd)
- AVX_OP(Cmpneqpd, cmpneqpd)
AVX_OP(Cmpnltpd, cmpnltpd)
AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Sqrtss, sqrtss)
- AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Cvttpd2dq, cvttpd2dq)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Pand, pand)
- AVX_OP(Por, por)
- AVX_OP(Pxor, pxor)
- AVX_OP(Psubb, psubb)
- AVX_OP(Psubw, psubw)
- AVX_OP(Psubd, psubd)
- AVX_OP(Psubq, psubq)
- AVX_OP(Psubsb, psubsb)
AVX_OP(Psubsw, psubsw)
- AVX_OP(Psubusb, psubusb)
AVX_OP(Psubusw, psubusw)
- AVX_OP(Pslld, pslld)
- AVX_OP(Pavgb, pavgb)
- AVX_OP(Pavgw, pavgw)
- AVX_OP(Psraw, psraw)
- AVX_OP(Psrad, psrad)
- AVX_OP(Psllw, psllw)
- AVX_OP(Psllq, psllq)
- AVX_OP(Psrlw, psrlw)
- AVX_OP(Psrld, psrld)
- AVX_OP(Psrlq, psrlq)
- AVX_OP(Paddb, paddb)
- AVX_OP(Paddw, paddw)
- AVX_OP(Paddd, paddd)
- AVX_OP(Paddq, paddq)
- AVX_OP(Paddsb, paddsb)
AVX_OP(Paddsw, paddsw)
- AVX_OP(Paddusb, paddusb)
- AVX_OP(Paddusw, paddusw)
AVX_OP(Pcmpgtd, pcmpgtd)
- AVX_OP(Pmuludq, pmuludq)
- AVX_OP(Addpd, addpd)
- AVX_OP(Subpd, subpd)
- AVX_OP(Mulpd, mulpd)
- AVX_OP(Minps, minps)
- AVX_OP(Minpd, minpd)
- AVX_OP(Divpd, divpd)
- AVX_OP(Maxps, maxps)
- AVX_OP(Maxpd, maxpd)
- AVX_OP(Addps, addps)
- AVX_OP(Subps, subps)
- AVX_OP(Mulps, mulps)
- AVX_OP(Divps, divps)
- AVX_OP(Packsswb, packsswb)
- AVX_OP(Packuswb, packuswb)
- AVX_OP(Packssdw, packssdw)
- AVX_OP(Punpcklbw, punpcklbw)
- AVX_OP(Punpcklwd, punpcklwd)
- AVX_OP(Punpckldq, punpckldq)
- AVX_OP(Punpckhbw, punpckhbw)
- AVX_OP(Punpckhwd, punpckhwd)
- AVX_OP(Punpckhdq, punpckhdq)
- AVX_OP(Punpcklqdq, punpcklqdq)
- AVX_OP(Punpckhqdq, punpckhqdq)
- AVX_OP(Cmpps, cmpps)
- AVX_OP(Cmppd, cmppd)
+ AVX_OP(Pcmpeqb, pcmpeqb)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pcmpeqd, pcmpeqd)
AVX_OP(Movlhps, movlhps)
- AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSSE3(Phaddd, phaddd)
AVX_OP_SSSE3(Phaddw, phaddw)
AVX_OP_SSSE3(Pshufb, pshufb)
- AVX_OP_SSSE3(Psignb, psignb)
- AVX_OP_SSSE3(Psignw, psignw)
- AVX_OP_SSSE3(Psignd, psignd)
- AVX_OP_SSSE3(Palignr, palignr)
AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Packusdw, packusdw)
- AVX_OP_SSE4_1(Pminsb, pminsb)
AVX_OP_SSE4_1(Pminsd, pminsd)
AVX_OP_SSE4_1(Pminuw, pminuw)
AVX_OP_SSE4_1(Pminud, pminud)
- AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
- AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pmaxud, pmaxud)
AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Pinsrq, pinsrq)
- AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Pextrq, pextrq)
AVX_OP_SSE4_1(Roundss, roundss)
AVX_OP_SSE4_1(Roundsd, roundsd)
@@ -190,6 +107,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
#undef AVX_OP
+ // Define movq here instead of using AVX_OP. movq is defined using templates
+ // and there is a function template `void movq(P1)`, while technically
+ // impossible, will be selected when deducing the arguments for AvxHelper.
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
+
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -199,10 +122,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int64_t x);
- void Set(Operand dst, intptr_t x);
-
// Operations on roots in the root-array.
void LoadRoot(Register destination, RootIndex index) override;
void LoadRoot(Operand destination, RootIndex index) {
@@ -323,8 +242,28 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
j(less, dest);
}
+#ifdef V8_MAP_PACKING
+ void UnpackMapWord(Register r);
+#endif
+
void LoadMap(Register destination, Register object);
+ void Move(Register dst, intptr_t x) {
+ if (x == 0) {
+ xorl(dst, dst);
+ } else if (is_uint8(x)) {
+ xorl(dst, dst);
+ movb(dst, Immediate(static_cast<uint32_t>(x)));
+ } else if (is_uint32(x)) {
+ movl(dst, Immediate(static_cast<uint32_t>(x)));
+ } else if (is_int32(x)) {
+ // "movq reg64, imm32" is sign extending.
+ movq(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ movq(dst, Immediate64(x));
+ }
+ }
+ void Move(Operand dst, intptr_t x);
void Move(Register dst, Smi source);
void Move(Operand dst, Smi source) {
@@ -332,13 +271,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
movq(dst, constant);
}
- void Move(Register dst, TaggedIndex source) {
- movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
- }
+ void Move(Register dst, TaggedIndex source) { Move(dst, source.ptr()); }
- void Move(Operand dst, TaggedIndex source) {
- movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
- }
+ void Move(Operand dst, TaggedIndex source) { Move(dst, source.ptr()); }
void Move(Register dst, ExternalReference ext);
@@ -449,10 +384,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pmaddubsw(XMMRegister dst, XMMRegister src1, Operand src2);
void Pmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Unpcklps(XMMRegister dst, XMMRegister src1, Operand src2);
- // Shufps that will mov src1 into dst if AVX is not supported.
- void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8);
-
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
@@ -467,16 +398,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Psllq(XMMRegister dst, int imm8) { Psllq(dst, static_cast<byte>(imm8)); }
- void Psllq(XMMRegister dst, byte imm8);
- void Psrlq(XMMRegister dst, int imm8) { Psrlq(dst, static_cast<byte>(imm8)); }
- void Psrlq(XMMRegister dst, byte imm8);
- void Pslld(XMMRegister dst, byte imm8);
- void Psrld(XMMRegister dst, byte imm8);
-
- // Supports both AVX (dst != src1) and SSE (checks that dst == src1).
- void Psrld(XMMRegister dst, XMMRegister src, byte imm8);
-
void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask);
void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -551,7 +472,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// stack check, do it before calling this function because this function may
// write into the newly allocated space. It may also overwrite the given
// register's value, in the version that takes a register.
-#ifdef V8_TARGET_OS_WIN
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void AllocateStackSpace(Register bytes_scratch);
void AllocateStackSpace(int bytes);
#else
@@ -575,7 +496,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, isolate_root);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- Move(kPointerCageBaseRegister, isolate_root);
+ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
#endif
}
@@ -675,10 +597,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// ---------------------------------------------------------------------------
// V8 Heap sandbox support
+ enum class IsolateRootLocation { kInScratchRegister, kInRootRegister };
// Loads a field containing off-heap pointer and does necessary decoding
// if V8 heap sandbox is enabled.
void LoadExternalPointerField(Register destination, Operand field_operand,
- ExternalPointerTag tag);
+ ExternalPointerTag tag, Register scratch,
+ IsolateRootLocation isolateRootLocation =
+ IsolateRootLocation::kInRootRegister);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -751,8 +676,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -761,11 +686,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// the write barrier if the value is a smi.
void RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Frame restart support.
- void MaybeDropFrames();
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
@@ -797,7 +719,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -807,11 +729,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
void InvokeFunction(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
@@ -975,18 +897,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1017,7 +939,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
void EnterExitFramePrologue(Register saved_rax_reg,
StackFrame::Type frame_type);
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 9a812d06a1..61e7ccf396 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -212,7 +212,7 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r12;
+constexpr Register kSpeculationPoisonRegister = r11;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r12;
@@ -236,9 +236,9 @@ constexpr Register kScratchRegister = r10;
constexpr XMMRegister kScratchDoubleReg = xmm15;
constexpr Register kRootRegister = r13; // callee save
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-constexpr Register kPointerCageBaseRegister = r14; // callee save
+constexpr Register kPtrComprCageBaseRegister = r14; // callee save
#else
-constexpr Register kPointerCageBaseRegister = kRootRegister;
+constexpr Register kPtrComprCageBaseRegister = kRootRegister;
#endif
constexpr Register kOffHeapTrampolineRegister = kScratchRegister;