summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/arm64')
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h20
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc24
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h10
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h4
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.cc7
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h16
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h56
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc208
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h25
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h13
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.cc17
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.h3
12 files changed, 208 insertions, 195 deletions
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index baae106c1c..ce34da7dc2 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -54,14 +54,12 @@ inline bool CPURegister::IsSP() const {
}
inline void CPURegList::Combine(const CPURegList& other) {
- DCHECK(IsValid());
DCHECK(other.type() == type_);
DCHECK(other.RegisterSizeInBits() == size_);
list_ |= other.list();
}
inline void CPURegList::Remove(const CPURegList& other) {
- DCHECK(IsValid());
if (other.type() == type_) {
list_ &= ~other.list();
}
@@ -84,13 +82,12 @@ inline void CPURegList::Remove(const CPURegister& other1,
}
inline void CPURegList::Combine(int code) {
- DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ |= (1ULL << code);
+ DCHECK(IsValid());
}
inline void CPURegList::Remove(int code) {
- DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ &= ~(1ULL << code);
}
@@ -311,6 +308,18 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
+Operand Operand::ToW() const {
+ if (IsShiftedRegister()) {
+ DCHECK(reg_.Is64Bits());
+ return Operand(reg_.W(), shift(), shift_amount());
+ } else if (IsExtendedRegister()) {
+ DCHECK(reg_.Is64Bits());
+ return Operand(reg_.W(), extend(), shift_amount());
+ }
+ DCHECK(IsImmediate());
+ return *this;
+}
+
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||
@@ -711,7 +720,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index c798d3a8a0..ea2f4696bd 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -63,18 +63,16 @@ void CpuFeatures::PrintFeatures() {}
// CPURegList utilities.
CPURegister CPURegList::PopLowestIndex() {
- DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
- int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ int index = base::bits::CountTrailingZeros(list_);
DCHECK((1LL << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
CPURegister CPURegList::PopHighestIndex() {
- DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
@@ -369,8 +367,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), AllocationType::kOld);
+ Handle<HeapObject> object =
+ isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
EmbeddedObjectIndex index = AddEmbeddedObject(object);
set_embedded_object_index_referenced_from(pc, index);
break;
@@ -3967,19 +3966,24 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr,
bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); }
bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ bool offset_is_size_multiple =
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
+ offset);
return offset_is_size_multiple && is_uint12(offset >> size);
}
bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ bool offset_is_size_multiple =
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
+ offset);
return offset_is_size_multiple && is_int7(offset >> size);
}
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
- (((offset >> inst_size) << inst_size) == offset);
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> inst_size)
+ << inst_size) == offset);
DCHECK_GT(offset, 0);
offset >>= kLoadLiteralScaleLog2;
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
@@ -4178,9 +4182,9 @@ bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n,
// 1110ss 4 UInt(ss)
// 11110s 2 UInt(s)
//
- // So we 'or' (-d << 1) with our computed s to form imms.
+ // So we 'or' (-d * 2) with our computed s to form imms.
*n = out_n;
- *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
+ *imm_s = ((-d * 2) | (s - 1)) & 0x3F;
*imm_r = r;
return true;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 04ee6d8b75..23e8acb1f9 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -8,6 +8,7 @@
#include <deque>
#include <list>
#include <map>
+#include <memory>
#include <vector>
#include "src/base/optional.h"
@@ -105,6 +106,9 @@ class Operand {
// which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const;
+ // Returns new Operand adapted for using with W registers.
+ inline Operand ToW() const;
+
inline Immediate immediate() const;
inline int64_t ImmediateValue() const;
inline RelocInfo::Mode ImmediateRMode() const;
@@ -189,9 +193,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler();
+ ~Assembler() override;
- virtual void AbortedCodeGeneration();
+ void AbortedCodeGeneration() override;
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
@@ -375,7 +379,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Instruction set functions ------------------------------------------------
// Branch / Jump instructions.
- // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // For branches offsets are scaled, i.e. in instructions not in bytes.
// Branch to register.
void br(const Register& xn);
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index 914268644a..ccafae5e14 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -33,6 +33,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
+constexpr uint8_t kLoadLiteralScale = 1 << kLoadLiteralScaleLog2;
constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
@@ -146,7 +147,8 @@ const unsigned kFloat16ExponentBias = 15;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
// TODO(sigurds): Choose best value.
-constexpr int kRootRegisterBias = 256;
+// TODO(ishell): Choose best value for ptr-compr.
+constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 256 : 0;
using float16 = uint16_t;
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index 05f3654da9..ab022affdd 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -71,6 +71,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate,
unsigned int width) {
DCHECK_LE(width, 64);
rotate &= 63;
+ if (rotate == 0) return value;
return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
(value >> rotate);
}
@@ -191,16 +192,16 @@ int64_t Instruction::ImmPCOffset() {
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
- offset = ImmBranch() << kInstrSizeLog2;
+ offset = ImmBranch() * kInstrSize;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
- offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
+ offset = ImmUnresolvedInternalReference() * kInstrSize;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
- offset = ImmLLiteral() << kInstrSizeLog2;
+ offset = ImmLLiteral() * kInstrSize;
}
return offset;
}
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 1132ba39db..7fe732e2ba 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
+#include "src/base/memory.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/arm64/utils-arm64.h"
@@ -82,11 +83,13 @@ enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
class Instruction {
public:
V8_INLINE Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this));
}
V8_INLINE void SetInstructionBits(Instr new_instr) {
- *reinterpret_cast<Instr*>(this) = new_instr;
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr);
}
int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
@@ -96,7 +99,9 @@ class Instruction {
}
int32_t SignedBits(int msb, int lsb) const {
- int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ int32_t bits =
+ base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this));
return signed_bitextract_32(msb, lsb, bits);
}
@@ -125,7 +130,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
- int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) |
+ ImmPCRelLo();
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@@ -404,7 +410,7 @@ class Instruction {
void SetImmLLiteral(Instruction* source);
uintptr_t LiteralAddress() {
- int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
+ int offset = ImmLLiteral() * kLoadLiteralScale;
return reinterpret_cast<uintptr_t>(this) + offset;
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 62bd9c26bf..261fd1e564 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -93,6 +93,15 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
}
}
+void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Ccmp(rn.W(), operand.ToW(), nzcv, cond);
+ } else {
+ Ccmp(rn, operand, nzcv, cond);
+ }
+}
+
void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
@@ -157,6 +166,14 @@ void TurboAssembler::Cmp(const Register& rn, const Operand& operand) {
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
+void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Cmp(rn.W(), operand.ToW());
+ } else {
+ Cmp(rn, operand);
+ }
+}
+
void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -982,7 +999,12 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
AssertSmi(src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- Asr(dst, src, kSmiShift);
+ if (COMPRESS_POINTERS_BOOL) {
+ Asr(dst.W(), src.W(), kSmiShift);
+ Sxtw(dst, dst);
+ } else {
+ Asr(dst, src, kSmiShift);
+ }
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
@@ -1002,11 +1024,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
} else {
DCHECK(SmiValuesAre31Bits());
-#ifdef V8_COMPRESS_POINTERS
- Ldrsw(dst, src);
-#else
- Ldr(dst, src);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ Ldr(dst.W(), src);
+ } else {
+ Ldr(dst, src);
+ }
SmiUntag(dst);
}
}
@@ -1029,13 +1051,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
- Cmp(x, y);
- B(eq, dest);
+ CompareAndBranch(x, y, eq, dest);
}
void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
- Cmp(x, y);
- B(lt, dest);
+ CompareAndBranch(x, y, lt, dest);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
@@ -1083,7 +1103,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const int shift = base::bits::CountTrailingZeros(unit_size);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@@ -1136,7 +1156,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const int shift = base::bits::CountTrailingZeros(unit_size);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@@ -1175,7 +1195,7 @@ void TurboAssembler::DropSlots(int64_t count) {
void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
-void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
+void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label) {
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
@@ -1190,6 +1210,16 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
}
}
+void TurboAssembler::CompareTaggedAndBranch(const Register& lhs,
+ const Operand& rhs, Condition cond,
+ Label* label) {
+ if (COMPRESS_POINTERS_BOOL) {
+ CompareAndBranch(lhs.W(), rhs.ToW(), cond, label);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, label);
+ }
+}
+
void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 0a721b0647..892458fe8b 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -295,7 +295,9 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
} else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
Handle<HeapObject> x(
reinterpret_cast<Address*>(operand.ImmediateValue()));
- IndirectLoadConstant(rd, x);
+ // TODO(v8:9706): Fix-it! This load will always uncompress the value
+ // even when we are loading a compressed embedded object.
+ IndirectLoadConstant(rd.X(), x);
return;
}
}
@@ -650,7 +652,14 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
// The move was successful; nothing to do here.
} else {
// Pre-shift the immediate to the least-significant bits of the register.
- int shift_low = CountTrailingZeros(imm, reg_size);
+ int shift_low;
+ if (reg_size == 64) {
+ shift_low = base::bits::CountTrailingZeros(imm);
+ } else {
+ DCHECK_EQ(reg_size, 32);
+ shift_low = base::bits::CountTrailingZeros(static_cast<uint32_t>(imm));
+ }
+
if (mode == kLimitShiftForSP) {
// When applied to the stack pointer, the subsequent arithmetic operation
// can use the extend form to shift left by a maximum of four bits. Right
@@ -1456,15 +1465,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- Mov(result, Handle<HeapObject>::cast(object));
- } else {
- Mov(result, Operand(Smi::cast(*object)));
- }
-}
-
void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
@@ -1923,21 +1923,25 @@ void TurboAssembler::Call(ExternalReference target) {
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
- STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
-
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 0);
- Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift);
-#else
- STATIC_ASSERT(kSmiShiftSize == 31);
- Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
-#endif
- Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset());
- Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+ if (SmiValuesAre32Bits()) {
+ Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
+ Add(builtin_index, builtin_index,
+ IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ Add(builtin_index, kRootRegister,
+ Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift));
+ } else {
+ Add(builtin_index, kRootRegister,
+ Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift));
+ }
+ Ldr(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+ }
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -2207,43 +2211,34 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Bind(&regular_invoke);
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
- Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
-
- {
- // Load receiver to pass it later to DebugOnFunctionCall hook.
- Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
- : Operand(actual.reg());
- Mov(x4, actual_op);
- Ldr(x4, MemOperand(sp, x4, LSL, kSystemPointerSizeLog2));
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ if (actual.is_reg()) {
+ Ldr(x4, MemOperand(sp, actual.reg(), LSL, kSystemPointerSizeLog2));
+ } else {
+ Ldr(x4, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2));
+ }
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- Register expected_reg = padreg;
- Register actual_reg = padreg;
- if (expected.is_reg()) expected_reg = expected.reg();
- if (actual.is_reg()) actual_reg = actual.reg();
- if (!new_target.is_valid()) new_target = padreg;
+ Register expected_reg = padreg;
+ Register actual_reg = padreg;
+ if (expected.is_reg()) expected_reg = expected.reg();
+ if (actual.is_reg()) actual_reg = actual.reg();
+ if (!new_target.is_valid()) new_target = padreg;
- // Save values on stack.
- SmiTag(expected_reg);
- SmiTag(actual_reg);
- Push(expected_reg, actual_reg, new_target, fun);
- Push(fun, x4);
- CallRuntime(Runtime::kDebugOnFunctionCall);
+ // Save values on stack.
+ SmiTag(expected_reg);
+ SmiTag(actual_reg);
+ Push(expected_reg, actual_reg, new_target, fun);
+ Push(fun, x4);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
- // Restore values from stack.
- Pop(fun, new_target, actual_reg, expected_reg);
- SmiUntag(actual_reg);
- SmiUntag(expected_reg);
- }
- Bind(&skip_hook);
+ // Restore values from stack.
+ Pop(fun, new_target, actual_reg, expected_reg);
+ SmiUntag(actual_reg);
+ SmiUntag(expected_reg);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -2256,7 +2251,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Ldrsb(x4, MemOperand(x4));
+ Cbnz(x4, &debug_hook);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -2284,6 +2285,12 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
JumpCodeObject(code);
}
}
+ B(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ B(&continue_after_hook);
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -2636,7 +2643,7 @@ void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
- Cmp(obj, temp);
+ CmpTagged(obj, temp);
}
void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
@@ -2669,20 +2676,20 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressTaggedPointer(destination, field_operand);
-#else
- Ldr(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
}
void TurboAssembler::LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressAnyTagged(destination, field_operand);
-#else
- Ldr(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
}
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
@@ -2691,33 +2698,31 @@ void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
void TurboAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- Str(value.W(), dst_field_operand);
- RecordComment("]");
-#else
- Str(value, dst_field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ Str(value.W(), dst_field_operand);
+ } else {
+ Str(value, dst_field_operand);
+ }
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedSigned");
- Ldrsw(destination, field_operand);
+ Ldr(destination.W(), field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedSigned");
- Sxtw(destination, source);
+ Mov(destination.W(), source.W());
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
- Ldrsw(destination, field_operand);
+ Ldr(destination.W(), field_operand);
Add(destination, kRootRegister, destination);
RecordComment("]");
}
@@ -2725,57 +2730,22 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
- Add(destination, kRootRegister, Operand(source, SXTW));
+ Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
- Ldrsw(destination, field_operand);
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- UseScratchRegisterScope temps(this);
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- Sbfx(masked_root, destination, 0, kSmiTagSize);
- And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- Add(destination, masked_root, destination);
- } else {
- Label done;
- JumpIfSmi(destination, &done);
- Add(destination, kRootRegister, destination);
- bind(&done);
- }
+ Ldr(destination.W(), field_operand);
+ Add(destination, kRootRegister, destination);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- UseScratchRegisterScope temps(this);
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- Sbfx(masked_root, source, 0, kSmiTagSize);
- And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- Add(destination, masked_root, Operand(source, SXTW));
- } else {
- Label done;
- Sxtw(destination, source);
- JumpIfSmi(destination, &done);
- Add(destination, kRootRegister, destination);
- bind(&done);
- }
+ Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 94091e8624..cb3b51eb52 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -652,6 +652,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const Operand& operand);
inline void Blr(const Register& xn);
inline void Cmp(const Register& rn, const Operand& operand);
+ inline void CmpTagged(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
@@ -843,6 +844,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(const Register& object, int mask, Condition cc,
Label* condition_met);
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
+ inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
+
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
inline void TestAndBranchIfAnySet(const Register& reg,
@@ -1006,6 +1014,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Conditional macros.
inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
+ inline void CcmpTagged(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond);
inline void Clz(const Register& rd, const Register& rn);
@@ -1597,8 +1607,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
tbx(vd, vn, vn2, vn3, vn4, vm);
}
- void LoadObject(Register result, Handle<Object> object);
-
inline void PushSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
@@ -1643,11 +1651,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
- // Compare a register with an operand, and branch to label depending on the
- // condition. May corrupt the status flags.
- inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
- Condition cond, Label* label);
-
// Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no
// side effects.
@@ -1767,10 +1770,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& actual, Label* done,
InvokeFlag flag, bool* definitely_mismatches);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index a782bf9cd8..2bdf0ceea0 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -105,7 +105,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
enum RegisterType { kRegister, kVRegister, kNoRegister };
static constexpr CPURegister no_reg() {
- return CPURegister{0, 0, kNoRegister};
+ return CPURegister{kCode_no_reg, 0, kNoRegister};
}
template <int code, int size, RegisterType type>
@@ -567,8 +567,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -597,18 +595,16 @@ class V8_EXPORT_PRIVATE CPURegList {
}
CPURegister::RegisterType type() const {
- DCHECK(IsValid());
return type_;
}
RegList list() const {
- DCHECK(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
- DCHECK(IsValid());
list_ = new_list;
+ DCHECK(IsValid());
}
// Combine another CPURegList into this one. Registers that already exist in
@@ -656,7 +652,6 @@ class V8_EXPORT_PRIVATE CPURegList {
static CPURegList GetSafepointSavedRegisters();
bool IsEmpty() const {
- DCHECK(IsValid());
return list_ == 0;
}
@@ -664,7 +659,6 @@ class V8_EXPORT_PRIVATE CPURegList {
const CPURegister& other2 = NoCPUReg,
const CPURegister& other3 = NoCPUReg,
const CPURegister& other4 = NoCPUReg) const {
- DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
@@ -674,12 +668,10 @@ class V8_EXPORT_PRIVATE CPURegList {
}
int Count() const {
- DCHECK(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
int RegisterSizeInBits() const {
- DCHECK(IsValid());
return size_;
}
@@ -690,7 +682,6 @@ class V8_EXPORT_PRIVATE CPURegList {
}
int TotalSizeInBytes() const {
- DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
diff --git a/deps/v8/src/codegen/arm64/utils-arm64.cc b/deps/v8/src/codegen/arm64/utils-arm64.cc
index 2f972ce502..dba2eeb7e1 100644
--- a/deps/v8/src/codegen/arm64/utils-arm64.cc
+++ b/deps/v8/src/codegen/arm64/utils-arm64.cc
@@ -89,15 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) {
}
}
-int CountTrailingZeros(uint64_t value, int width) {
- DCHECK((width == 32) || (width == 64));
- if (width == 64) {
- return static_cast<int>(base::bits::CountTrailingZeros64(value));
- }
- return static_cast<int>(base::bits::CountTrailingZeros32(
- static_cast<uint32_t>(value & 0xFFFFFFFFF)));
-}
-
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@@ -109,7 +100,7 @@ int CountSetBits(uint64_t value, int width) {
int LowestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
- return CountTrailingZeros(value, 64) + 1;
+ return base::bits::CountTrailingZeros(value) + 1;
}
int HighestSetBitPosition(uint64_t value) {
@@ -118,12 +109,14 @@ int HighestSetBitPosition(uint64_t value) {
}
uint64_t LargestPowerOf2Divisor(uint64_t value) {
- return value & (-(int64_t)value);
+ // Simulate two's complement (instead of casting to signed and negating) to
+ // avoid undefined behavior on signed overflow.
+ return value & ((~value) + 1);
}
int MaskToBit(uint64_t mask) {
DCHECK_EQ(CountSetBits(mask, 64), 1);
- return CountTrailingZeros(mask, 64);
+ return base::bits::CountTrailingZeros(mask);
}
#undef __
diff --git a/deps/v8/src/codegen/arm64/utils-arm64.h b/deps/v8/src/codegen/arm64/utils-arm64.h
index 6bddce6fff..182d781d55 100644
--- a/deps/v8/src/codegen/arm64/utils-arm64.h
+++ b/deps/v8/src/codegen/arm64/utils-arm64.h
@@ -33,7 +33,6 @@ int float16classify(float16 value);
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
-V8_EXPORT_PRIVATE int CountTrailingZeros(uint64_t value, int width);
V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width);
int LowestSetBitPosition(uint64_t value);
int HighestSetBitPosition(uint64_t value);
@@ -61,7 +60,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
- T result = 0;
+ typename std::make_unsigned<T>::type result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];