summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/arm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/arm')
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc148
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h77
-rw-r--r--deps/v8/src/codegen/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h256
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc306
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc98
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h27
8 files changed, 460 insertions, 456 deletions
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 7035fa2492..f72e27703e 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -206,7 +206,7 @@ Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
+ if (V8_UNLIKELY(buffer_space() <= kGap)) {
GrowBuffer();
}
MaybeCheckConstPool();
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 17a20a6f97..09c57928ff 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -534,9 +534,8 @@ Assembler::Assembler(const AssemblerOptions& options,
: AssemblerBase(options, std::move(buffer)),
pending_32_bit_constants_(),
scratch_register_list_(ip.bit()) {
- pending_32_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
- next_buffer_check_ = 0;
+ constant_pool_deadline_ = kMaxInt;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
@@ -556,7 +555,10 @@ Assembler::Assembler(const AssemblerOptions& options,
}
}
-Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); }
+Assembler::~Assembler() {
+ DCHECK_EQ(const_pool_blocked_nesting_, 0);
+ DCHECK_EQ(first_const_pool_32_use_, -1);
+}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
@@ -841,7 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_2 << 16
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- DCHECK(is_uint24(target24));
+ CHECK(is_uint24(target24));
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
@@ -897,7 +899,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
- DCHECK(is_int24(imm24));
+ CHECK(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@@ -1030,10 +1032,53 @@ namespace {
bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
Instr* instr) {
// imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
- if ((imm8 <= 0xFF)) {
- *rotate_imm = rot;
+ {
+ // 32-bit immediates can be encoded as:
+ // (8-bit value, 2*N bit left rotation)
+ // e.g. 0xab00 can be encoded as 0xab shifted left by 8 == 2*4, i.e.
+ // (0xab, 4)
+ //
+ // Check three categories which cover all possible shifter fits:
+ // 1. 0x000000FF: The value is already 8-bit (no shifting necessary),
+ // 2. 0x000FF000: The 8-bit value is somewhere in the middle of the 32-bit
+ // value, and
+ // 3. 0xF000000F: The 8-bit value is split over the beginning and end of
+ // the 32-bit value.
+
+ // For 0x000000FF.
+ if (imm32 <= 0xFF) {
+ *rotate_imm = 0;
+ *immed_8 = imm32;
+ return true;
+ }
+ // For 0x000FF000, count trailing zeros and shift down to 0x000000FF. Note
+ // that we have to round the trailing zeros down to the nearest multiple of
+ // two, since we can only encode shifts of 2*N. Note also that we know that
+ // imm32 isn't zero, since we already checked if it's less than 0xFF.
+ int half_trailing_zeros = base::bits::CountTrailingZerosNonZero(imm32) / 2;
+ uint32_t imm8 = imm32 >> (half_trailing_zeros * 2);
+ if (imm8 <= 0xFF) {
+ DCHECK_GT(half_trailing_zeros, 0);
+ // Rotating right by trailing_zeros is equivalent to rotating left by
+ // 32 - trailing_zeros. We return rotate_right / 2, so calculate
+ // (32 - trailing_zeros)/2 == 16 - trailing_zeros/2.
+ *rotate_imm = (16 - half_trailing_zeros);
+ *immed_8 = imm8;
+ return true;
+ }
+ // For 0xF000000F, rotate by 16 to get 0x000FF000 and continue as if it
+ // were that case.
+ uint32_t imm32_rot16 = base::bits::RotateLeft32(imm32, 16);
+ half_trailing_zeros =
+ base::bits::CountTrailingZerosNonZero(imm32_rot16) / 2;
+ imm8 = imm32_rot16 >> (half_trailing_zeros * 2);
+ if (imm8 <= 0xFF) {
+ // We've rotated left by 2*8, so we can't have more than that many
+ // trailing zeroes.
+ DCHECK_LT(half_trailing_zeros, 8);
+ // We've already rotated by 2*8, before calculating trailing_zeros/2,
+ // so we need (32 - (16 + trailing_zeros))/2 == 8 - trailing_zeros/2.
+ *rotate_imm = 8 - half_trailing_zeros;
*immed_8 = imm8;
return true;
}
@@ -2258,7 +2303,7 @@ void Assembler::bkpt(uint32_t imm16) {
}
void Assembler::svc(uint32_t imm24, Condition cond) {
- DCHECK(is_uint24(imm24));
+ CHECK(is_uint24(imm24));
emit(cond | 15 * B24 | imm24);
}
@@ -5204,8 +5249,13 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
(rmode == RelocInfo::CODE_TARGET && value != 0) ||
(RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
- if (pending_32_bit_constants_.empty()) {
+ if (first_const_pool_32_use_ < 0) {
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK_EQ(constant_pool_deadline_, kMaxInt);
first_const_pool_32_use_ = position;
+ constant_pool_deadline_ = position + kCheckPoolDeadline;
+ } else {
+ DCHECK(!pending_32_bit_constants_.empty());
}
ConstantPoolEntry entry(position, value, sharing_ok, rmode);
@@ -5224,7 +5274,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
- pending_32_bit_constants_.push_back(entry);
+ pending_32_bit_constants_.emplace_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@@ -5239,17 +5289,17 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
- // Max pool start (if we need a jump and an alignment).
-#ifdef DEBUG
- int start = pc_limit + kInstrSize + 2 * kPointerSize;
- DCHECK(pending_32_bit_constants_.empty() ||
- (start < first_const_pool_32_use_ + kMaxDistToIntPool));
-#endif
no_const_pool_before_ = pc_limit;
}
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
+ // If we're due a const pool check before the block finishes, move it to just
+ // after the block.
+ if (constant_pool_deadline_ < no_const_pool_before_) {
+ // Make sure that the new deadline isn't too late (including a jump and the
+ // constant pool marker).
+ DCHECK_LE(no_const_pool_before_,
+ first_const_pool_32_use_ + kMaxDistToIntPool);
+ constant_pool_deadline_ = no_const_pool_before_;
}
}
@@ -5265,49 +5315,44 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (pending_32_bit_constants_.empty()) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ // We should only fall into this case if we're either trying to forcing
+ // emission or opportunistically checking after a jump.
+ DCHECK(force_emit || !require_jump);
return;
}
- // Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool and the constant pool marker and
- // the gap to the relocation information).
- int jump_instr = require_jump ? kInstrSize : 0;
- int size_up_to_marker = jump_instr + kInstrSize;
- int estimated_size_after_marker =
- pending_32_bit_constants_.size() * kPointerSize;
- int estimated_size = size_up_to_marker + estimated_size_after_marker;
-
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance from the first instruction accessing the constant pool to
- // any of the constant pool entries will exceed its limit the next
- // time the pool is checked. This is overly restrictive, but we don't emit
- // constant pool entries in-order so it's conservatively correct.
+ // the first constant pool entry will exceed its limit the next time the
+ // pool is checked.
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- DCHECK(!pending_32_bit_constants_.empty());
- bool need_emit = false;
- int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
- if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
- (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
- need_emit = true;
+ DCHECK_NE(first_const_pool_32_use_, -1);
+ int dist32 = pc_offset() - first_const_pool_32_use_;
+ if (require_jump) {
+ // We should only be on this path if we've exceeded our deadline.
+ DCHECK_GE(dist32, kCheckPoolDeadline);
+ } else if (dist32 < kCheckPoolDeadline / 2) {
+ return;
}
- if (!need_emit) return;
}
- // Deduplicate constants.
- int size_after_marker = estimated_size_after_marker;
+ int size_after_marker = pending_32_bit_constants_.size() * kPointerSize;
+ // Deduplicate constants.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
if (entry.is_merged()) size_after_marker -= kPointerSize;
}
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstrSize : 0;
+ int size_up_to_marker = jump_instr + kInstrSize;
int size = size_up_to_marker + size_after_marker;
-
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@@ -5331,6 +5376,14 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
emit(kConstantPoolMarker |
EncodeConstantPoolLength(size_after_marker / kPointerSize));
+ // The first entry in the constant pool should also be the first
+ CHECK_EQ(first_const_pool_32_use_, pending_32_bit_constants_[0].position());
+ CHECK(!pending_32_bit_constants_[0].is_merged());
+
+ // Make sure we're not emitting the constant too late.
+ CHECK_LE(pc_offset(),
+ first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
+
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5354,6 +5407,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
ConstantPoolEntry& merged =
pending_32_bit_constants_[entry.merged_index()];
DCHECK(entry.value() == merged.value());
+ DCHECK_LT(merged.position(), entry.position());
Instr merged_instr = instr_at(merged.position());
DCHECK(IsLdrPcImmediateOffset(merged_instr));
delta = GetLdrRegisterImmediateOffset(merged_instr);
@@ -5379,9 +5433,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ // Since a constant pool was just emitted, we don't need another check until
+ // the next constant pool entry is added.
+ constant_pool_deadline_ = kMaxInt;
}
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index e0490a6853..04d5eef054 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -45,6 +45,7 @@
#include <memory>
#include <vector>
+#include "src/base/small-vector.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/register-arm.h"
#include "src/codegen/assembler.h"
@@ -310,7 +311,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
~Assembler() override;
- void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); }
+ void AbortedCodeGeneration() override {
+ pending_32_bit_constants_.clear();
+ first_const_pool_32_use_ = -1;
+ }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -1148,13 +1152,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int DecodeShiftImm(Instr instr);
static Instr PatchShiftImm(Instr instr, int immed);
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+ // Constants are accessed via pc relative addressing, which can reach −4095 to
+ // 4095 for integer PC-relative loads, and −1020 to 1020 for floating-point
// PC-relative loads, thereby defining a maximum distance between the
- // instruction and the accessed constant.
- static constexpr int kMaxDistToIntPool = 4 * KB;
- // All relocations could be integer, it therefore acts as the limit.
- static constexpr int kMinNumPendingConstants = 4;
+ // instruction and the accessed constant. Additionally, PC-relative loads
+ // start at a delta from the actual load instruction's PC, so we can add this
+ // on to the (positive) distance.
+ static constexpr int kMaxDistToPcRelativeConstant =
+ 4095 + Instruction::kPcLoadDelta;
+ // The constant pool needs to be jumped over, and has a marker, so the actual
+ // distance from the instruction and start of the constant pool has to include
+ // space for these two instructions.
+ static constexpr int kMaxDistToIntPool =
+ kMaxDistToPcRelativeConstant - 2 * kInstrSize;
+ // Experimentally derived as sufficient for ~95% of compiles.
+ static constexpr int kTypicalNumPending32Constants = 32;
+ // The maximum number of pending constants is reached by a sequence of only
+ // constant loads, which limits it to the number of constant loads that can
+ // fit between the first constant load and the distance to the constant pool.
static constexpr int kMaxNumPending32Constants =
kMaxDistToIntPool / kInstrSize;
@@ -1165,8 +1180,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
- void MaybeCheckConstPool() {
- if (pc_offset() >= next_buffer_check_) {
+ V8_INLINE void MaybeCheckConstPool() {
+ if (V8_UNLIKELY(pc_offset() >= constant_pool_deadline_)) {
CheckConstPool(false, true);
}
}
@@ -1192,9 +1207,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_buffer_check_ = kMaxInt;
+ // Prevent constant pool checks happening by resetting the deadline.
+ constant_pool_deadline_ = kMaxInt;
}
}
@@ -1202,19 +1216,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
+ if (first_const_pool_32_use_ >= 0) {
#ifdef DEBUG
- // Max pool start (if we need a jump and an alignment).
- int start = pc_offset() + kInstrSize + 2 * kPointerSize;
- // Check the constant pool hasn't been blocked for too long.
- DCHECK(pending_32_bit_constants_.empty() ||
- (start < first_const_pool_32_use_ + kMaxDistToIntPool));
+ // Check the constant pool hasn't been blocked for too long.
+ DCHECK_LE(pc_offset(), first_const_pool_32_use_ + kMaxDistToIntPool);
#endif
- // Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
+ // Reset the constant pool check back to the deadline.
+ constant_pool_deadline_ = first_const_pool_32_use_ + kCheckPoolDeadline;
+ }
}
}
@@ -1258,7 +1267,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
- std::vector<ConstantPoolEntry> pending_32_bit_constants_;
+ base::SmallVector<ConstantPoolEntry, kTypicalNumPending32Constants>
+ pending_32_bit_constants_;
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
@@ -1268,8 +1278,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
- int next_buffer_check_; // pc offset of next buffer check
-
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
@@ -1281,11 +1289,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// if so, a relocation info entry is associated to the constant pool entry.
// Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static constexpr int kCheckPoolIntervalInst = 32;
- static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
+ // expensive. Instead, we check once a deadline is hit; the deadline being
+ // when there is a possibility that MaybeCheckConstPool won't be called before
+ // kMaxDistToIntPoolWithHeader is exceeded. Since MaybeCheckConstPool is
+ // called in CheckBuffer, this means that kGap is an upper bound on this
+ // check. Use 2 * kGap just to give it some slack around BlockConstPoolScopes.
+ static constexpr int kCheckPoolDeadline = kMaxDistToIntPool - 2 * kGap;
+
+ // pc offset of the upcoming constant pool deadline. Equivalent to
+ // first_const_pool_32_use_ + kCheckPoolDeadline.
+ int constant_pool_deadline_;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -1298,7 +1311,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- inline void CheckBuffer();
+ V8_INLINE void CheckBuffer();
void GrowBuffer();
// Instruction generation
diff --git a/deps/v8/src/codegen/arm/cpu-arm.cc b/deps/v8/src/codegen/arm/cpu-arm.cc
index 47fe4bdb74..88491c5e51 100644
--- a/deps/v8/src/codegen/arm/cpu-arm.cc
+++ b/deps/v8/src/codegen/arm/cpu-arm.cc
@@ -6,7 +6,7 @@
#ifdef __arm__
#ifdef __QNXNTO__
#include <sys/mman.h> // for cache flushing.
-#undef MAP_TYPE // NOLINT
+#undef MAP_TYPE
#elif V8_OS_FREEBSD
#include <machine/sysarch.h> // for cache flushing
#include <sys/types.h>
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
new file mode 100644
index 0000000000..296f72d157
--- /dev/null
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
+#define V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r0, r1, r2, r3, r4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return r3;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r0 : number of arguments
+ // r1 : the target to call
+ return RegisterArray(r1, r0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r4 : arguments list length (untagged)
+ // r2 : arguments list (FixedArray)
+ return RegisterArray(r1, r0, r4, r2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r0 : number of arguments
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r1 : function template info
+ // r2 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r1, r2);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r2 : the object to spread
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r1 : the target to call
+ // r2 : the arguments list
+ return RegisterArray(r1, r2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r4 : arguments list length (untagged)
+ // r2 : arguments list (FixedArray)
+ return RegisterArray(r1, r3, r0, r4, r2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r0 : number of arguments
+ // r3 : the new target
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the object to spread
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the arguments list
+ return RegisterArray(r1, r3, r2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : allocation site or undefined
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r1); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r1, r0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r1, r0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r1, // kApiFunctionAddress
+ r2, // kArgc
+ r3, // kCallData
+ r0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r0, // argument count (not including receiver)
+ r2, // address of first argument
+ r1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r0, // argument count (not including receiver)
+ r4, // address of the first argument
+ r1, // constructor to call
+ r3, // new target
+ r2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r0, // the value to pass to the generator
+ r1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r0, r1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM
+
+#endif // V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
deleted file mode 100644
index 53992227ab..0000000000
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r0, r1, r2, r3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r1; }
-const Register LoadDescriptor::NameRegister() { return r2; }
-const Register LoadDescriptor::SlotRegister() { return r0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r1; }
-const Register StoreDescriptor::NameRegister() { return r2; }
-const Register StoreDescriptor::ValueRegister() { return r0; }
-const Register StoreDescriptor::SlotRegister() { return r4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
-const Register StoreTransitionDescriptor::MapRegister() { return r5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return r3; }
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r4 : arguments list length (untagged)
- // r2 : arguments list (FixedArray)
- Register registers[] = {r1, r0, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r2 : start index (to support rest parameters)
- // r1 : the target to call
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : function template info
- // r2 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r2 : the object to spread
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : the target to call
- // r2 : the arguments list
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r3 : the new target
- // r4 : arguments list length (untagged)
- // r2 : arguments list (FixedArray)
- Register registers[] = {r1, r3, r0, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r3 : the new target
- // r2 : start index (to support rest parameters)
- // r1 : the target to call
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r3 : the new target
- // r2 : the object to spread
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : the target to call
- // r3 : the new target
- // r2 : the arguments list
- Register registers[] = {r1, r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- // r3 : the new target
- // r2 : allocation site or undefined
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1: left operand
- // r0: right operand
- // r2: feedback slot
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1: left operand
- // r0: right operand
- // r2: feedback slot
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // kApiFunctionAddress
- r2, // kArgc
- r3, // kCallData
- r0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (not including receiver)
- r2, // address of first argument
- r1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (not including receiver)
- r4, // address of the first argument
- r1, // constructor to call
- r3, // new target
- r2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // the value to pass to the generator
- r1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0, r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index f83eee4a91..d4e12f3092 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -13,6 +13,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -59,7 +60,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -85,7 +86,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
SaveFPRegs(sp, lr);
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -96,7 +97,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
RestoreFPRegs(sp, lr);
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -660,7 +661,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -668,7 +669,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -680,7 +681,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
- save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ save_fp, remembered_set_action, SmiCheck::kOmit);
bind(&done);
}
@@ -826,7 +827,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK_NE(object, value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -837,7 +838,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -847,7 +848,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -1435,7 +1436,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
PushCommonFrame(scratch);
// Reserve room for saved entry sp.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(scratch, Operand::Zero());
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1622,7 +1623,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r0: actual arguments count
// r1: function (passed through to callee)
@@ -1722,9 +1723,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
@@ -1746,17 +1747,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
b(&done);
@@ -1773,9 +1776,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r1.
DCHECK_EQ(fun, r1);
@@ -1790,15 +1793,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r1.
DCHECK_EQ(function, r1);
@@ -1807,18 +1810,7 @@ void MacroAssembler::InvokeFunction(Register function,
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
InvokeFunctionCode(r1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r1, restart_fp);
- ldr(r1, MemOperand(r1));
- tst(r1, r1);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1993,8 +1985,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
DCHECK_EQ(builtin.address() & 1, 1);
#endif
Move(r1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2034,11 +2026,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
- if (emit_debug_code()) Check(cond, reason);
+ if (FLAG_debug_code) Check(cond, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason) {
@@ -2052,11 +2044,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2143,7 +2135,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmi);
@@ -2151,7 +2143,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(eq, AbortReason::kOperandIsNotASmi);
@@ -2159,7 +2151,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
@@ -2173,7 +2165,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
@@ -2187,7 +2179,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
@@ -2199,7 +2191,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2229,7 +2221,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
@@ -2520,7 +2512,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if V8_HOST_ARCH_ARM
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index e622d4aa17..3a54f6c45f 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -29,8 +29,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -656,16 +654,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot at |offset|
// has been written. |value| is the object being stored.
void RecordWrite(
Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -689,7 +687,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -700,13 +698,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -784,18 +779,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -874,7 +869,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};