summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen')
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h3
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc21
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h5
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc113
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h8
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h20
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc24
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h10
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h4
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.cc7
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h16
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h56
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc208
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h25
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h13
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.cc17
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.h3
-rw-r--r--deps/v8/src/codegen/assembler.cc6
-rw-r--r--deps/v8/src/codegen/assembler.h1
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc2529
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h738
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc24
-rw-r--r--deps/v8/src/codegen/compilation-cache.h19
-rw-r--r--deps/v8/src/codegen/compiler.cc48
-rw-r--r--deps/v8/src/codegen/constant-pool.cc18
-rw-r--r--deps/v8/src/codegen/constant-pool.h9
-rw-r--r--deps/v8/src/codegen/cpu-features.h31
-rw-r--r--deps/v8/src/codegen/external-reference.cc20
-rw-r--r--deps/v8/src/codegen/external-reference.h3
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h14
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc151
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h144
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc116
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h44
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h66
-rw-r--r--deps/v8/src/codegen/machine-type.h114
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips-inl.h5
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc117
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc135
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h10
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64-inl.h3
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc68
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h4
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc153
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h10
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc11
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h8
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.cc2
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h3
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc4
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h1
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc43
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h10
-rw-r--r--deps/v8/src/codegen/reglist.h16
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h3
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc4
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h1
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc137
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h80
-rw-r--r--deps/v8/src/codegen/source-position-table.cc45
-rw-r--r--deps/v8/src/codegen/tnode.h374
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc55
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h49
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc409
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h42
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h4
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h1
73 files changed, 3445 insertions, 3042 deletions
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index feb2f62f78..64d2d7b97d 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,6 +1,6 @@
bbudge@chromium.org
bmeurer@chromium.org
-clemensh@chromium.org
+clemensb@chromium.org
gdeepti@chromium.org
ishell@chromium.org
jarin@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 3fbd679104..45ec07a382 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -118,7 +118,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 9c46063537..6659960bb8 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -40,6 +40,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/base/overflowing-math.h"
#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -452,8 +453,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
@@ -4802,15 +4803,17 @@ void Assembler::GrowBuffer() {
int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
MemMove(new_start, buffer_start_, pc_offset());
- MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- reloc_size);
+ byte* new_reloc_start = reinterpret_cast<byte*>(
+ reinterpret_cast<Address>(reloc_info_writer.pos()) + rc_delta);
+ MemMove(new_reloc_start, reloc_info_writer.pos(), reloc_size);
// Switch buffers.
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
+ pc_ = reinterpret_cast<byte*>(reinterpret_cast<Address>(pc_) + pc_delta);
+ byte* new_last_pc = reinterpret_cast<byte*>(
+ reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
+ reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
@@ -4831,7 +4834,7 @@ void Assembler::dd(uint32_t data) {
// blocked before using dd.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
+ base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), data);
pc_ += sizeof(uint32_t);
}
@@ -4840,7 +4843,7 @@ void Assembler::dq(uint64_t value) {
// blocked before using dq.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) = value;
+ base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
pc_ += sizeof(uint64_t);
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index f669943f34..1d280e5555 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
+#include <memory>
#include <vector>
#include "src/codegen/arm/constants-arm.h"
@@ -305,9 +306,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler();
+ ~Assembler() override;
- virtual void AbortedCodeGeneration() { pending_32_bit_constants_.clear(); }
+ void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 7f6d82518e..6f1adfead2 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -573,7 +573,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
@@ -1602,57 +1602,43 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- Move(r4, debug_hook_active);
- ldrsb(r4, MemOperand(r4));
- cmp(r4, Operand(0));
- b(eq, &skip_hook);
-
- {
- // Load receiver to pass it later to DebugOnFunctionCall hook.
- if (actual.is_reg()) {
- mov(r4, actual.reg());
- } else {
- mov(r4, Operand(actual.immediate()));
- }
- ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2));
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg());
- Push(actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Push(r4);
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg());
- }
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ if (actual.is_reg()) {
+ ldr(r4, MemOperand(sp, actual.reg(), LSL, kPointerSizeLog2));
+ } else {
+ ldr(r4, MemOperand(sp, actual.immediate() << kPointerSizeLog2));
+ }
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Push(r4);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
}
- bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1665,7 +1651,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ Move(r4, debug_hook_active);
+ ldrsb(r4, MemOperand(r4));
+ cmp(r4, Operand(0));
+ b(ne, &debug_hook);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -1687,11 +1682,17 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(code);
}
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
}
+ b(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ b(&continue_after_hook);
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
}
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index bbea40b9a6..4807a6d20d 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -633,10 +633,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index baae106c1c..ce34da7dc2 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -54,14 +54,12 @@ inline bool CPURegister::IsSP() const {
}
inline void CPURegList::Combine(const CPURegList& other) {
- DCHECK(IsValid());
DCHECK(other.type() == type_);
DCHECK(other.RegisterSizeInBits() == size_);
list_ |= other.list();
}
inline void CPURegList::Remove(const CPURegList& other) {
- DCHECK(IsValid());
if (other.type() == type_) {
list_ &= ~other.list();
}
@@ -84,13 +82,12 @@ inline void CPURegList::Remove(const CPURegister& other1,
}
inline void CPURegList::Combine(int code) {
- DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ |= (1ULL << code);
+ DCHECK(IsValid());
}
inline void CPURegList::Remove(int code) {
- DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ &= ~(1ULL << code);
}
@@ -311,6 +308,18 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
+Operand Operand::ToW() const {
+ if (IsShiftedRegister()) {
+ DCHECK(reg_.Is64Bits());
+ return Operand(reg_.W(), shift(), shift_amount());
+ } else if (IsExtendedRegister()) {
+ DCHECK(reg_.Is64Bits());
+ return Operand(reg_.W(), extend(), shift_amount());
+ }
+ DCHECK(IsImmediate());
+ return *this;
+}
+
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||
@@ -711,7 +720,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index c798d3a8a0..ea2f4696bd 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -63,18 +63,16 @@ void CpuFeatures::PrintFeatures() {}
// CPURegList utilities.
CPURegister CPURegList::PopLowestIndex() {
- DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
- int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ int index = base::bits::CountTrailingZeros(list_);
DCHECK((1LL << index) & list_);
Remove(index);
return CPURegister::Create(index, size_, type_);
}
CPURegister CPURegList::PopHighestIndex() {
- DCHECK(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
@@ -369,8 +367,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), AllocationType::kOld);
+ Handle<HeapObject> object =
+ isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
EmbeddedObjectIndex index = AddEmbeddedObject(object);
set_embedded_object_index_referenced_from(pc, index);
break;
@@ -3967,19 +3966,24 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr,
bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); }
bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ bool offset_is_size_multiple =
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
+ offset);
return offset_is_size_multiple && is_uint12(offset >> size);
}
bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ bool offset_is_size_multiple =
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
+ offset);
return offset_is_size_multiple && is_int7(offset >> size);
}
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
- (((offset >> inst_size) << inst_size) == offset);
+ (static_cast<int64_t>(static_cast<uint64_t>(offset >> inst_size)
+ << inst_size) == offset);
DCHECK_GT(offset, 0);
offset >>= kLoadLiteralScaleLog2;
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
@@ -4178,9 +4182,9 @@ bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n,
// 1110ss 4 UInt(ss)
// 11110s 2 UInt(s)
//
- // So we 'or' (-d << 1) with our computed s to form imms.
+ // So we 'or' (-d * 2) with our computed s to form imms.
*n = out_n;
- *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
+ *imm_s = ((-d * 2) | (s - 1)) & 0x3F;
*imm_r = r;
return true;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 04ee6d8b75..23e8acb1f9 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -8,6 +8,7 @@
#include <deque>
#include <list>
#include <map>
+#include <memory>
#include <vector>
#include "src/base/optional.h"
@@ -105,6 +106,9 @@ class Operand {
// which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const;
+ // Returns new Operand adapted for using with W registers.
+ inline Operand ToW() const;
+
inline Immediate immediate() const;
inline int64_t ImmediateValue() const;
inline RelocInfo::Mode ImmediateRMode() const;
@@ -189,9 +193,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler();
+ ~Assembler() override;
- virtual void AbortedCodeGeneration();
+ void AbortedCodeGeneration() override;
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
@@ -375,7 +379,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Instruction set functions ------------------------------------------------
// Branch / Jump instructions.
- // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // For branches offsets are scaled, i.e. in instructions not in bytes.
// Branch to register.
void br(const Register& xn);
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index 914268644a..ccafae5e14 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -33,6 +33,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
+constexpr uint8_t kLoadLiteralScale = 1 << kLoadLiteralScaleLog2;
constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
@@ -146,7 +147,8 @@ const unsigned kFloat16ExponentBias = 15;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
// TODO(sigurds): Choose best value.
-constexpr int kRootRegisterBias = 256;
+// TODO(ishell): Choose best value for ptr-compr.
+constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 256 : 0;
using float16 = uint16_t;
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index 05f3654da9..ab022affdd 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -71,6 +71,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate,
unsigned int width) {
DCHECK_LE(width, 64);
rotate &= 63;
+ if (rotate == 0) return value;
return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
(value >> rotate);
}
@@ -191,16 +192,16 @@ int64_t Instruction::ImmPCOffset() {
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
- offset = ImmBranch() << kInstrSizeLog2;
+ offset = ImmBranch() * kInstrSize;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
- offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
+ offset = ImmUnresolvedInternalReference() * kInstrSize;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
- offset = ImmLLiteral() << kInstrSizeLog2;
+ offset = ImmLLiteral() * kInstrSize;
}
return offset;
}
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 1132ba39db..7fe732e2ba 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
+#include "src/base/memory.h"
#include "src/codegen/arm64/constants-arm64.h"
#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/arm64/utils-arm64.h"
@@ -82,11 +83,13 @@ enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
class Instruction {
public:
V8_INLINE Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this));
}
V8_INLINE void SetInstructionBits(Instr new_instr) {
- *reinterpret_cast<Instr*>(this) = new_instr;
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr);
}
int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
@@ -96,7 +99,9 @@ class Instruction {
}
int32_t SignedBits(int msb, int lsb) const {
- int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ // Usually this is aligned, but when de/serializing that's not guaranteed.
+ int32_t bits =
+ base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this));
return signed_bitextract_32(msb, lsb, bits);
}
@@ -125,7 +130,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
- int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) |
+ ImmPCRelLo();
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@@ -404,7 +410,7 @@ class Instruction {
void SetImmLLiteral(Instruction* source);
uintptr_t LiteralAddress() {
- int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
+ int offset = ImmLLiteral() * kLoadLiteralScale;
return reinterpret_cast<uintptr_t>(this) + offset;
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 62bd9c26bf..261fd1e564 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -93,6 +93,15 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
}
}
+void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Ccmp(rn.W(), operand.ToW(), nzcv, cond);
+ } else {
+ Ccmp(rn, operand, nzcv, cond);
+ }
+}
+
void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
@@ -157,6 +166,14 @@ void TurboAssembler::Cmp(const Register& rn, const Operand& operand) {
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
+void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Cmp(rn.W(), operand.ToW());
+ } else {
+ Cmp(rn, operand);
+ }
+}
+
void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -982,7 +999,12 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
AssertSmi(src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- Asr(dst, src, kSmiShift);
+ if (COMPRESS_POINTERS_BOOL) {
+ Asr(dst.W(), src.W(), kSmiShift);
+ Sxtw(dst, dst);
+ } else {
+ Asr(dst, src, kSmiShift);
+ }
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
@@ -1002,11 +1024,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
} else {
DCHECK(SmiValuesAre31Bits());
-#ifdef V8_COMPRESS_POINTERS
- Ldrsw(dst, src);
-#else
- Ldr(dst, src);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ Ldr(dst.W(), src);
+ } else {
+ Ldr(dst, src);
+ }
SmiUntag(dst);
}
}
@@ -1029,13 +1051,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
- Cmp(x, y);
- B(eq, dest);
+ CompareAndBranch(x, y, eq, dest);
}
void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
- Cmp(x, y);
- B(lt, dest);
+ CompareAndBranch(x, y, lt, dest);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
@@ -1083,7 +1103,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const int shift = base::bits::CountTrailingZeros(unit_size);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@@ -1136,7 +1156,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
DCHECK(base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const int shift = base::bits::CountTrailingZeros(unit_size);
const Operand size(count, LSL, shift);
if (size.IsZero()) {
@@ -1175,7 +1195,7 @@ void TurboAssembler::DropSlots(int64_t count) {
void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
-void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
+void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label) {
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
@@ -1190,6 +1210,16 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
}
}
+void TurboAssembler::CompareTaggedAndBranch(const Register& lhs,
+ const Operand& rhs, Condition cond,
+ Label* label) {
+ if (COMPRESS_POINTERS_BOOL) {
+ CompareAndBranch(lhs.W(), rhs.ToW(), cond, label);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, label);
+ }
+}
+
void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 0a721b0647..892458fe8b 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -295,7 +295,9 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
} else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
Handle<HeapObject> x(
reinterpret_cast<Address*>(operand.ImmediateValue()));
- IndirectLoadConstant(rd, x);
+ // TODO(v8:9706): Fix-it! This load will always uncompress the value
+ // even when we are loading a compressed embedded object.
+ IndirectLoadConstant(rd.X(), x);
return;
}
}
@@ -650,7 +652,14 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
// The move was successful; nothing to do here.
} else {
// Pre-shift the immediate to the least-significant bits of the register.
- int shift_low = CountTrailingZeros(imm, reg_size);
+ int shift_low;
+ if (reg_size == 64) {
+ shift_low = base::bits::CountTrailingZeros(imm);
+ } else {
+ DCHECK_EQ(reg_size, 32);
+ shift_low = base::bits::CountTrailingZeros(static_cast<uint32_t>(imm));
+ }
+
if (mode == kLimitShiftForSP) {
// When applied to the stack pointer, the subsequent arithmetic operation
// can use the extend form to shift left by a maximum of four bits. Right
@@ -1456,15 +1465,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- Mov(result, Handle<HeapObject>::cast(object));
- } else {
- Mov(result, Operand(Smi::cast(*object)));
- }
-}
-
void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
@@ -1923,21 +1923,25 @@ void TurboAssembler::Call(ExternalReference target) {
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
- STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
-
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 0);
- Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift);
-#else
- STATIC_ASSERT(kSmiShiftSize == 31);
- Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
-#endif
- Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset());
- Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+ if (SmiValuesAre32Bits()) {
+ Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
+ Add(builtin_index, builtin_index,
+ IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ Add(builtin_index, kRootRegister,
+ Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift));
+ } else {
+ Add(builtin_index, kRootRegister,
+ Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift));
+ }
+ Ldr(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+ }
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -2207,43 +2211,34 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Bind(&regular_invoke);
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
- Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
-
- {
- // Load receiver to pass it later to DebugOnFunctionCall hook.
- Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
- : Operand(actual.reg());
- Mov(x4, actual_op);
- Ldr(x4, MemOperand(sp, x4, LSL, kSystemPointerSizeLog2));
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ if (actual.is_reg()) {
+ Ldr(x4, MemOperand(sp, actual.reg(), LSL, kSystemPointerSizeLog2));
+ } else {
+ Ldr(x4, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2));
+ }
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- Register expected_reg = padreg;
- Register actual_reg = padreg;
- if (expected.is_reg()) expected_reg = expected.reg();
- if (actual.is_reg()) actual_reg = actual.reg();
- if (!new_target.is_valid()) new_target = padreg;
+ Register expected_reg = padreg;
+ Register actual_reg = padreg;
+ if (expected.is_reg()) expected_reg = expected.reg();
+ if (actual.is_reg()) actual_reg = actual.reg();
+ if (!new_target.is_valid()) new_target = padreg;
- // Save values on stack.
- SmiTag(expected_reg);
- SmiTag(actual_reg);
- Push(expected_reg, actual_reg, new_target, fun);
- Push(fun, x4);
- CallRuntime(Runtime::kDebugOnFunctionCall);
+ // Save values on stack.
+ SmiTag(expected_reg);
+ SmiTag(actual_reg);
+ Push(expected_reg, actual_reg, new_target, fun);
+ Push(fun, x4);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
- // Restore values from stack.
- Pop(fun, new_target, actual_reg, expected_reg);
- SmiUntag(actual_reg);
- SmiUntag(expected_reg);
- }
- Bind(&skip_hook);
+ // Restore values from stack.
+ Pop(fun, new_target, actual_reg, expected_reg);
+ SmiUntag(actual_reg);
+ SmiUntag(expected_reg);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -2256,7 +2251,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Ldrsb(x4, MemOperand(x4));
+ Cbnz(x4, &debug_hook);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -2284,6 +2285,12 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
JumpCodeObject(code);
}
}
+ B(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ B(&continue_after_hook);
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -2636,7 +2643,7 @@ void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
- Cmp(obj, temp);
+ CmpTagged(obj, temp);
}
void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
@@ -2669,20 +2676,20 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressTaggedPointer(destination, field_operand);
-#else
- Ldr(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
}
void TurboAssembler::LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressAnyTagged(destination, field_operand);
-#else
- Ldr(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
}
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
@@ -2691,33 +2698,31 @@ void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
void TurboAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- Str(value.W(), dst_field_operand);
- RecordComment("]");
-#else
- Str(value, dst_field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ Str(value.W(), dst_field_operand);
+ } else {
+ Str(value, dst_field_operand);
+ }
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedSigned");
- Ldrsw(destination, field_operand);
+ Ldr(destination.W(), field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedSigned");
- Sxtw(destination, source);
+ Mov(destination.W(), source.W());
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
- Ldrsw(destination, field_operand);
+ Ldr(destination.W(), field_operand);
Add(destination, kRootRegister, destination);
RecordComment("]");
}
@@ -2725,57 +2730,22 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
- Add(destination, kRootRegister, Operand(source, SXTW));
+ Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
- Ldrsw(destination, field_operand);
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- UseScratchRegisterScope temps(this);
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- Sbfx(masked_root, destination, 0, kSmiTagSize);
- And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- Add(destination, masked_root, destination);
- } else {
- Label done;
- JumpIfSmi(destination, &done);
- Add(destination, kRootRegister, destination);
- bind(&done);
- }
+ Ldr(destination.W(), field_operand);
+ Add(destination, kRootRegister, destination);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- UseScratchRegisterScope temps(this);
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- Sbfx(masked_root, source, 0, kSmiTagSize);
- And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- Add(destination, masked_root, Operand(source, SXTW));
- } else {
- Label done;
- Sxtw(destination, source);
- JumpIfSmi(destination, &done);
- Add(destination, kRootRegister, destination);
- bind(&done);
- }
+ Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 94091e8624..cb3b51eb52 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -652,6 +652,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const Operand& operand);
inline void Blr(const Register& xn);
inline void Cmp(const Register& rn, const Operand& operand);
+ inline void CmpTagged(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
@@ -843,6 +844,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(const Register& object, int mask, Condition cc,
Label* condition_met);
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
+ inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
+
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
inline void TestAndBranchIfAnySet(const Register& reg,
@@ -1006,6 +1014,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Conditional macros.
inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
+ inline void CcmpTagged(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond);
inline void Clz(const Register& rd, const Register& rn);
@@ -1597,8 +1607,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
tbx(vd, vn, vn2, vn3, vn4, vm);
}
- void LoadObject(Register result, Handle<Object> object);
-
inline void PushSizeRegList(
RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
@@ -1643,11 +1651,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
- // Compare a register with an operand, and branch to label depending on the
- // condition. May corrupt the status flags.
- inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
- Condition cond, Label* label);
-
// Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no
// side effects.
@@ -1767,10 +1770,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& actual, Label* done,
InvokeFlag flag, bool* definitely_mismatches);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index a782bf9cd8..2bdf0ceea0 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -105,7 +105,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
enum RegisterType { kRegister, kVRegister, kNoRegister };
static constexpr CPURegister no_reg() {
- return CPURegister{0, 0, kNoRegister};
+ return CPURegister{kCode_no_reg, 0, kNoRegister};
}
template <int code, int size, RegisterType type>
@@ -567,8 +567,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -597,18 +595,16 @@ class V8_EXPORT_PRIVATE CPURegList {
}
CPURegister::RegisterType type() const {
- DCHECK(IsValid());
return type_;
}
RegList list() const {
- DCHECK(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
- DCHECK(IsValid());
list_ = new_list;
+ DCHECK(IsValid());
}
// Combine another CPURegList into this one. Registers that already exist in
@@ -656,7 +652,6 @@ class V8_EXPORT_PRIVATE CPURegList {
static CPURegList GetSafepointSavedRegisters();
bool IsEmpty() const {
- DCHECK(IsValid());
return list_ == 0;
}
@@ -664,7 +659,6 @@ class V8_EXPORT_PRIVATE CPURegList {
const CPURegister& other2 = NoCPUReg,
const CPURegister& other3 = NoCPUReg,
const CPURegister& other4 = NoCPUReg) const {
- DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
@@ -674,12 +668,10 @@ class V8_EXPORT_PRIVATE CPURegList {
}
int Count() const {
- DCHECK(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
int RegisterSizeInBits() const {
- DCHECK(IsValid());
return size_;
}
@@ -690,7 +682,6 @@ class V8_EXPORT_PRIVATE CPURegList {
}
int TotalSizeInBytes() const {
- DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
diff --git a/deps/v8/src/codegen/arm64/utils-arm64.cc b/deps/v8/src/codegen/arm64/utils-arm64.cc
index 2f972ce502..dba2eeb7e1 100644
--- a/deps/v8/src/codegen/arm64/utils-arm64.cc
+++ b/deps/v8/src/codegen/arm64/utils-arm64.cc
@@ -89,15 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) {
}
}
-int CountTrailingZeros(uint64_t value, int width) {
- DCHECK((width == 32) || (width == 64));
- if (width == 64) {
- return static_cast<int>(base::bits::CountTrailingZeros64(value));
- }
- return static_cast<int>(base::bits::CountTrailingZeros32(
- static_cast<uint32_t>(value & 0xFFFFFFFFF)));
-}
-
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@@ -109,7 +100,7 @@ int CountSetBits(uint64_t value, int width) {
int LowestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
- return CountTrailingZeros(value, 64) + 1;
+ return base::bits::CountTrailingZeros(value) + 1;
}
int HighestSetBitPosition(uint64_t value) {
@@ -118,12 +109,14 @@ int HighestSetBitPosition(uint64_t value) {
}
uint64_t LargestPowerOf2Divisor(uint64_t value) {
- return value & (-(int64_t)value);
+ // Simulate two's complement (instead of casting to signed and negating) to
+ // avoid undefined behavior on signed overflow.
+ return value & ((~value) + 1);
}
int MaskToBit(uint64_t mask) {
DCHECK_EQ(CountSetBits(mask, 64), 1);
- return CountTrailingZeros(mask, 64);
+ return base::bits::CountTrailingZeros(mask);
}
#undef __
diff --git a/deps/v8/src/codegen/arm64/utils-arm64.h b/deps/v8/src/codegen/arm64/utils-arm64.h
index 6bddce6fff..182d781d55 100644
--- a/deps/v8/src/codegen/arm64/utils-arm64.h
+++ b/deps/v8/src/codegen/arm64/utils-arm64.h
@@ -33,7 +33,6 @@ int float16classify(float16 value);
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
-V8_EXPORT_PRIVATE int CountTrailingZeros(uint64_t value, int width);
V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width);
int LowestSetBitPosition(uint64_t value);
int HighestSetBitPosition(uint64_t value);
@@ -61,7 +60,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
- T result = 0;
+ typename std::make_unsigned<T>::type result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 498afb0320..4e354d9e54 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -92,7 +92,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer {
std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
DCHECK_LT(size(), new_size);
- return base::make_unique<DefaultAssemblerBuffer>(new_size);
+ return std::make_unique<DefaultAssemblerBuffer>(new_size);
}
private:
@@ -121,12 +121,12 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer {
std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* start,
int size) {
- return base::make_unique<ExternalAssemblerBufferImpl>(
+ return std::make_unique<ExternalAssemblerBufferImpl>(
reinterpret_cast<byte*>(start), size);
}
std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) {
- return base::make_unique<DefaultAssemblerBuffer>(size);
+ return std::make_unique<DefaultAssemblerBuffer>(size);
}
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 98639583d8..af70c4a48f 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -36,6 +36,7 @@
#define V8_CODEGEN_ASSEMBLER_H_
#include <forward_list>
+#include <memory>
#include <unordered_map>
#include "src/base/memory.h"
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 7dad8cb95e..3051ce3662 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -7,9 +7,11 @@
#include "include/v8-internal.h"
#include "src/base/macros.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/execution/frames-inl.h"
#include "src/execution/frames.h"
+#include "src/execution/protectors.h"
#include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop.
#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
@@ -17,6 +19,7 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
+#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-cell.h"
@@ -26,10 +29,6 @@ namespace v8 {
namespace internal {
using compiler::Node;
-template <class T>
-using TNode = compiler::TNode<T>;
-template <class T>
-using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state),
@@ -135,6 +134,148 @@ void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
Check(branch, message, file, line, extra_nodes);
}
+template <>
+TNode<Smi> CodeStubAssembler::IntPtrToParameter<Smi>(TNode<IntPtrT> value) {
+ return SmiTag(value);
+}
+template <>
+TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
+ TNode<IntPtrT> value) {
+ return value;
+}
+
+void CodeStubAssembler::CollectCallableFeedback(
+ TNode<Object> maybe_target, TNode<Context> context,
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
+ Label extra_checks(this, Label::kDeferred), done(this);
+
+ // Check if we have monomorphic {target} feedback already.
+ TNode<MaybeObject> feedback =
+ LoadFeedbackVectorSlot(feedback_vector, slot_id);
+ Comment("check if monomorphic");
+ TNode<BoolT> is_monomorphic = IsWeakReferenceToObject(feedback, maybe_target);
+ GotoIf(is_monomorphic, &done);
+
+ // Check if it is a megamorphic {target}.
+ Comment("check if megamorphic");
+ TNode<BoolT> is_megamorphic = TaggedEqual(
+ feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+ Branch(is_megamorphic, &done, &extra_checks);
+
+ BIND(&extra_checks);
+ {
+ Label initialize(this), mark_megamorphic(this);
+
+ Comment("check if weak reference");
+ TNode<BoolT> is_uninitialized = TaggedEqual(
+ feedback,
+ HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
+ GotoIf(is_uninitialized, &initialize);
+ CSA_ASSERT(this, IsWeakOrCleared(feedback));
+
+ // If the weak reference is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak reference is cleared");
+ Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
+
+ BIND(&initialize);
+ {
+ Comment("check if function in same native context");
+ GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic);
+ TNode<HeapObject> target = CAST(maybe_target);
+ // Check if the {target} is a JSFunction or JSBoundFunction
+ // in the current native context.
+ TVARIABLE(HeapObject, var_current, target);
+ Label loop(this, &var_current), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Label if_boundfunction(this), if_function(this);
+ TNode<HeapObject> current = var_current.value();
+ TNode<Uint16T> current_instance_type = LoadInstanceType(current);
+ GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
+ &if_boundfunction);
+ Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
+ &if_function, &mark_megamorphic);
+
+ BIND(&if_function);
+ {
+ // Check that the JSFunction {current} is in the current native
+ // context.
+ TNode<Context> current_context =
+ CAST(LoadObjectField(current, JSFunction::kContextOffset));
+ TNode<NativeContext> current_native_context =
+ LoadNativeContext(current_context);
+ Branch(
+ TaggedEqual(LoadNativeContext(context), current_native_context),
+ &done_loop, &mark_megamorphic);
+ }
+ BIND(&if_boundfunction);
+ {
+ // Continue with the [[BoundTargetFunction]] of {target}.
+ var_current = LoadObjectField<HeapObject>(
+ current, JSBoundFunction::kBoundTargetFunctionOffset);
+ Goto(&loop);
+ }
+ }
+ BIND(&done_loop);
+ StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, target);
+ ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
+ Goto(&done);
+ }
+
+ BIND(&mark_megamorphic);
+ {
+ // MegamorphicSentinel is an immortal immovable object so
+ // write-barrier is not needed.
+ Comment("transition to megamorphic");
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
+ StoreFeedbackVectorSlot(
+ feedback_vector, slot_id,
+ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ ReportFeedbackUpdate(feedback_vector, slot_id,
+ "Call:TransitionMegamorphic");
+ Goto(&done);
+ }
+ }
+
+ BIND(&done);
+}
+
+void CodeStubAssembler::CollectCallFeedback(
+ TNode<Object> maybe_target, TNode<Context> context,
+ TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot_id) {
+ Label feedback_done(this);
+ // If feedback_vector is not valid, then nothing to do.
+ GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
+
+ // Increment the call count.
+ TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
+ IncrementCallCount(feedback_vector, slot_id);
+
+ // Collect the callable {target} feedback.
+ CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id);
+ Goto(&feedback_done);
+
+ BIND(&feedback_done);
+}
+
+void CodeStubAssembler::IncrementCallCount(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
+ Comment("increment call count");
+ TNode<Smi> call_count =
+ CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
+ // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
+ // count are used as flags. To increment the call count by 1 we hence
+ // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
+ TNode<Smi> new_count = SmiAdd(
+ call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
+ // Count is Smi, so we don't need a write barrier.
+ StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
+ SKIP_WRITE_BARRIER, kTaggedSize);
+}
+
void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
Label ok(this), not_ok(this, Label::kDeferred);
Branch(condition, &ok, &not_ok);
@@ -221,7 +362,7 @@ TNode<Object> CodeStubAssembler::NoContextConstant() {
}
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<Heap>().rootAccessorName())>::type>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \
@@ -232,7 +373,7 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \
@@ -242,14 +383,12 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
- compiler::TNode<BoolT> CodeStubAssembler::Is##name( \
- SloppyTNode<Object> value) { \
- return TaggedEqual(value, name##Constant()); \
- } \
- compiler::TNode<BoolT> CodeStubAssembler::IsNot##name( \
- SloppyTNode<Object> value) { \
- return TaggedNotEqual(value, name##Constant()); \
+#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
+ TNode<BoolT> CodeStubAssembler::Is##name(SloppyTNode<Object> value) { \
+ return TaggedEqual(value, name##Constant()); \
+ } \
+ TNode<BoolT> CodeStubAssembler::IsNot##name(SloppyTNode<Object> value) { \
+ return TaggedNotEqual(value, name##Constant()); \
}
HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
@@ -264,6 +403,21 @@ TNode<BInt> CodeStubAssembler::BIntConstant(int value) {
#endif
}
+template <>
+TNode<Smi> CodeStubAssembler::IntPtrOrSmiConstant<Smi>(int value) {
+ return SmiConstant(value);
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::IntPtrOrSmiConstant<IntPtrT>(int value) {
+ return IntPtrConstant(value);
+}
+
+template <>
+TNode<RawPtrT> CodeStubAssembler::IntPtrOrSmiConstant<RawPtrT>(int value) {
+ return ReinterpretCast<RawPtrT>(IntPtrConstant(value));
+}
+
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(value);
@@ -273,41 +427,29 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
-TNode<BoolT> CodeStubAssembler::IntPtrOrSmiEqual(Node* left, Node* right,
- ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiEqual(CAST(left), CAST(right));
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return IntPtrEqual(UncheckedCast<IntPtrT>(left),
- UncheckedCast<IntPtrT>(right));
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<Smi> test) {
+ Smi smi_test;
+ if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
+ return true;
}
+ return false;
}
-TNode<BoolT> CodeStubAssembler::IntPtrOrSmiNotEqual(Node* left, Node* right,
- ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiNotEqual(CAST(left), CAST(right));
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return WordNotEqual(UncheckedCast<IntPtrT>(left),
- UncheckedCast<IntPtrT>(right));
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test) {
+ int32_t constant_test;
+ if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
+ return true;
}
+ return false;
}
bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
ParameterMode mode) {
- int32_t constant_test;
- Smi smi_test;
if (mode == INTPTR_PARAMETERS) {
- if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
- return true;
- }
+ return IsIntPtrOrSmiConstantZero(UncheckedCast<IntPtrT>(test));
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
- if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
- return true;
- }
+ return IsIntPtrOrSmiConstantZero(UncheckedCast<Smi>(test));
}
return false;
}
@@ -352,6 +494,10 @@ Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
}
TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
+ intptr_t constant;
+ if (ToIntPtrConstant(value, &constant)) {
+ return BoolConstant(base::bits::IsPowerOfTwo(constant));
+ }
// value && !(value & (value - 1))
return IntPtrEqual(
Select<IntPtrT>(
@@ -578,21 +724,44 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) {
// Check that the Smi value is zero in the lower bits.
- TNode<IntPtrT> value = BitcastTaggedSignedToWord(smi);
+ TNode<IntPtrT> value = BitcastTaggedToWordForTagAndSmiBits(smi);
return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value));
}
return Int32TrueConstant();
}
-Node* CodeStubAssembler::SmiShiftBitsConstant() {
- return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return WordEqual(
+ BitcastTaggedToWordForTagAndSmiBits(smi),
+ BitcastTaggedToWordForTagAndSmiBits(NormalizeSmiIndex(smi)));
+ }
+ return Int32TrueConstant();
+}
+
+TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
+ if (COMPRESS_POINTERS_BOOL) {
+ TNode<Int32T> raw =
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(smi_index));
+ smi_index = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw));
+ }
+ return smi_index;
}
TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
- TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
- TNode<Smi> smi =
- BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant()));
- return smi;
+ if (COMPRESS_POINTERS_BOOL) {
+ static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1),
+ "Use shifting instead of add");
+ return BitcastWordToTaggedSigned(
+ ChangeUint32ToWord(Int32Add(value, value)));
+ }
+ return SmiTag(ChangeInt32ToIntPtr(value));
+}
+
+TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) {
+ CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value),
+ IntPtrConstant(Smi::kMaxValue)));
+ return SmiFromInt32(Signed(value));
}
TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
@@ -612,6 +781,9 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
+ if (COMPRESS_POINTERS_BOOL) {
+ return SmiFromInt32(TruncateIntPtrToInt32(value));
+ }
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
return smi;
@@ -622,11 +794,19 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
- return Signed(
- WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant()));
+ if (COMPRESS_POINTERS_BOOL) {
+ return ChangeInt32ToIntPtr(SmiToInt32(value));
+ }
+ return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
+ SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return Signed(Word32Sar(
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
+ SmiShiftBitsConstant32()));
+ }
TNode<IntPtrT> result = SmiUntag(value);
return TruncateIntPtrToInt32(result);
}
@@ -673,13 +853,13 @@ TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
return BitcastWordToTaggedSigned(
- TryIntPtrAdd(BitcastTaggedSignedToWord(lhs),
- BitcastTaggedSignedToWord(rhs), if_overflow));
+ TryIntPtrAdd(BitcastTaggedToWordForTagAndSmiBits(lhs),
+ BitcastTaggedToWordForTagAndSmiBits(rhs), if_overflow));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -690,8 +870,9 @@ TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
- TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
- BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs));
+ TNode<PairT<IntPtrT, BoolT>> pair =
+ IntPtrSubWithOverflow(BitcastTaggedToWordForTagAndSmiBits(lhs),
+ BitcastTaggedToWordForTagAndSmiBits(rhs));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<IntPtrT> result = Projection<0>(pair);
@@ -699,8 +880,8 @@ TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow(
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -878,7 +1059,7 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
}
BIND(&answer_zero);
{
- TNode<Word32T> or_result = Word32Or(lhs32, rhs32);
+ TNode<Int32T> or_result = Word32Or(lhs32, rhs32);
Label if_should_be_negative_zero(this), if_should_be_zero(this);
Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
&if_should_be_zero);
@@ -982,41 +1163,27 @@ TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
return ReinterpretCast<Int32T>(value);
}
-TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) {
- STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
- return Word32Equal(Word32And(TruncateIntPtrToInt32(BitcastTaggedToWord(a)),
- Int32Constant(kSmiTagMask)),
- Int32Constant(0));
-}
-
TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
return Word32Equal(
- Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(a)),
+ Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
Int32Constant(kSmiTagMask)),
Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
- // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we
- // can nonetheless use it to inspect the Smi tag. The assumption here is that
- // the GC will not exchange Smis for HeapObjects or vice-versa.
- TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
- STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
- return Word32NotEqual(
- Word32And(TruncateIntPtrToInt32(a_bitcast), Int32Constant(kSmiTagMask)),
- Int32Constant(0));
+TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) {
+ return Word32BinaryNot(TaggedIsSmi(a));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
return Word32Equal(
Word32And(
- TruncateIntPtrToInt32(BitcastTaggedToWord(a)),
- Uint32Constant(kSmiTagMask | static_cast<int32_t>(kSmiSignMask))),
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
+ Uint32Constant(static_cast<uint32_t>(kSmiTagMask | kSmiSignMask))),
Int32Constant(0));
#else
- return WordEqual(WordAnd(BitcastTaggedToWord(a),
+ return WordEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(a),
IntPtrConstant(kSmiTagMask | kSmiSignMask)),
IntPtrConstant(0));
#endif
@@ -1052,55 +1219,6 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
INTPTR_PARAMETERS, if_hole);
}
-void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
- Node* receiver_map, Label* definitely_no_elements,
- Label* possibly_elements) {
- CSA_SLOW_ASSERT(this, IsMap(receiver_map));
- VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
- Label loop_body(this, &var_map);
- TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
- TNode<NumberDictionary> empty_slow_element_dictionary =
- EmptySlowElementDictionaryConstant();
- Goto(&loop_body);
-
- BIND(&loop_body);
- {
- Node* map = var_map.value();
- TNode<HeapObject> prototype = LoadMapPrototype(map);
- GotoIf(IsNull(prototype), definitely_no_elements);
- TNode<Map> prototype_map = LoadMap(prototype);
- TNode<Uint16T> prototype_instance_type = LoadMapInstanceType(prototype_map);
-
- // Pessimistically assume elements if a Proxy, Special API Object,
- // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this
- // instance type check, it's not necessary to check for interceptors or
- // access checks.
- Label if_custom(this, Label::kDeferred), if_notcustom(this);
- Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type),
- &if_custom, &if_notcustom);
-
- BIND(&if_custom);
- {
- // For string JSPrimitiveWrapper wrappers we still support the checks as
- // long as they wrap the empty string.
- GotoIfNot(
- InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
- possibly_elements);
- Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype);
- Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
- }
-
- BIND(&if_notcustom);
- {
- TNode<FixedArrayBase> prototype_elements = LoadElements(CAST(prototype));
- var_map.Bind(prototype_map);
- GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body);
- Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary),
- &loop_body, possibly_elements);
- }
- }
-}
-
void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object,
Label* if_true, Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
@@ -1118,19 +1236,6 @@ void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#endif
}
-void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects(
- Label* if_true) {
- STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t));
-
- TNode<ExternalReference> execution_mode_address = ExternalConstant(
- ExternalReference::debug_execution_mode_address(isolate()));
- TNode<Int32T> execution_mode =
- UncheckedCast<Int32T>(Load(MachineType::Int32(), execution_mode_address));
-
- GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)),
- if_true);
-}
-
TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags,
TNode<RawPtrT> top_address,
@@ -1557,7 +1662,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
- SloppyTNode<JSObject> object) {
+ SloppyTNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1566,7 +1671,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
- SloppyTNode<JSObject> object) {
+ SloppyTNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1862,18 +1967,8 @@ TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32(
return LoadObjectField<Uint32T>(string, String::kLengthOffset);
}
-Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
- CSA_ASSERT(this, IsString(seq_string));
- CSA_ASSERT(this,
- IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- return IntPtrAdd(
- BitcastTaggedToWord(seq_string),
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) {
- CSA_ASSERT(this, IsJSPrimitiveWrapper(object));
+TNode<Object> CodeStubAssembler::LoadJSPrimitiveWrapperValue(
+ TNode<JSPrimitiveWrapper> object) {
return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset);
}
@@ -1887,15 +1982,9 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
GotoIf(IsCleared(maybe_object), if_cleared);
- GotoIf(Word32Equal(Word32And(TruncateIntPtrToInt32(
- BitcastMaybeObjectToWord(maybe_object)),
- Int32Constant(kHeapObjectTagMask)),
- Int32Constant(kHeapObjectTag)),
- &inner_if_strong);
+ GotoIf(IsStrong(maybe_object), &inner_if_strong);
- *extracted =
- BitcastWordToTagged(WordAnd(BitcastMaybeObjectToWord(maybe_object),
- IntPtrConstant(~kWeakHeapObjectMask)));
+ *extracted = GetHeapObjectAssumeWeak(maybe_object);
Goto(if_weak);
BIND(&inner_if_smi);
@@ -1908,10 +1997,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
}
TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
- return Word32Equal(
- Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
- Int32Constant(kHeapObjectTagMask)),
- Int32Constant(kHeapObjectTag));
+ return Word32Equal(Word32And(TruncateIntPtrToInt32(
+ BitcastTaggedToWordForTagAndSmiBits(value)),
+ Int32Constant(kHeapObjectTagMask)),
+ Int32Constant(kHeapObjectTag));
}
TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
@@ -1921,10 +2010,10 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong(
}
TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) {
- return Word32Equal(
- Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
- Int32Constant(kHeapObjectTagMask)),
- Int32Constant(kWeakHeapObjectTag));
+ return Word32Equal(Word32And(TruncateIntPtrToInt32(
+ BitcastTaggedToWordForTagAndSmiBits(value)),
+ Int32Constant(kHeapObjectTagMask)),
+ Int32Constant(kWeakHeapObjectTag));
}
TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
@@ -1932,11 +2021,6 @@ TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
Int32Constant(kClearedWeakHeapObjectLower32));
}
-TNode<BoolT> CodeStubAssembler::IsNotCleared(TNode<MaybeObject> value) {
- return Word32NotEqual(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)),
- Int32Constant(kClearedWeakHeapObjectLower32));
-}
-
TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
TNode<MaybeObject> value) {
CSA_ASSERT(this, IsWeakOrCleared(value));
@@ -1951,43 +2035,41 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
return GetHeapObjectAssumeWeak(value);
}
-TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value) {
-#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
- STATIC_ASSERT(kTaggedSize == kInt32Size);
- return Word32Equal(
- Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)),
- Uint32Constant(
- static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))),
- TruncateWordToInt32(BitcastTaggedToWord(value)));
-#else
- return WordEqual(WordAnd(BitcastMaybeObjectToWord(object),
- IntPtrConstant(~kWeakHeapObjectMask)),
- BitcastTaggedToWord(value));
-
-#endif
-}
-
-TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value) {
- return TaggedEqual(BitcastWordToTagged(BitcastMaybeObjectToWord(object)),
- value);
-}
-
-TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value) {
-#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS)
- return Word32NotEqual(
- Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)),
- Uint32Constant(
- static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))),
- TruncateWordToInt32(BitcastTaggedToWord(value)));
-#else
- return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object),
- IntPtrConstant(~kWeakHeapObjectMask)),
- BitcastTaggedToWord(value));
-
-#endif
+// This version generates
+// (maybe_object & ~mask) == value
+// It works for non-Smi |maybe_object| and for both Smi and HeapObject values
+// but requires a big constant for ~mask.
+TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject(
+ TNode<MaybeObject> maybe_object, TNode<Object> value) {
+ CSA_ASSERT(this, TaggedIsNotSmi(maybe_object));
+ if (COMPRESS_POINTERS_BOOL) {
+ return Word32Equal(
+ Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)),
+ Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))),
+ TruncateWordToInt32(BitcastTaggedToWord(value)));
+ } else {
+ return WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object),
+ IntPtrConstant(~kWeakHeapObjectMask)),
+ BitcastTaggedToWord(value));
+ }
+}
+
+// This version generates
+// maybe_object == (heap_object | mask)
+// It works for any |maybe_object| values and generates a better code because it
+// uses a small constant for mask.
+TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(
+ TNode<MaybeObject> maybe_object, TNode<HeapObject> heap_object) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return Word32Equal(
+ TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)),
+ Word32Or(TruncateWordToInt32(BitcastTaggedToWord(heap_object)),
+ Int32Constant(kWeakHeapObjectMask)));
+ } else {
+ return WordEqual(BitcastMaybeObjectToWord(maybe_object),
+ WordOr(BitcastTaggedToWord(heap_object),
+ IntPtrConstant(kWeakHeapObjectMask)));
+ }
}
TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
@@ -2123,16 +2205,27 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
return Signed(DecodeWord<PropertyArray::LengthField>(value));
}
-TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore(
+TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
TNode<JSTypedArray> typed_array) {
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset,
- MachineType::Pointer());
- TNode<Object> base_pointer =
- LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset);
- return UncheckedCast<RawPtrT>(
- IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
+ // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer).
+ TNode<RawPtrT> external_pointer = LoadObjectField<RawPtrT>(
+ typed_array, JSTypedArray::kExternalPointerOffset);
+
+ TNode<IntPtrT> base_pointer;
+ if (COMPRESS_POINTERS_BOOL) {
+ TNode<Int32T> compressed_base =
+ LoadObjectField<Int32T>(typed_array, JSTypedArray::kBasePointerOffset);
+ // Zero-extend TaggedT to WordT according to current compression scheme
+ // so that the addition with |external_pointer| (which already contains
+ // compensated offset value) below will decompress the tagged value.
+ // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
+ // details.
+ base_pointer = Signed(ChangeUint32ToWord(compressed_base));
+ } else {
+ base_pointer =
+ LoadObjectField<IntPtrT>(typed_array, JSTypedArray::kBasePointerOffset);
+ }
+ return RawPtrAdd(external_pointer, base_pointer);
}
TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
@@ -2267,8 +2360,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
return var_result.value();
}
-compiler::TNode<BigInt>
-CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
+TNode<BigInt> CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
Label if_zero(this), done(this);
if (Is64()) {
@@ -2416,59 +2508,30 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
return var_result.value();
}
-void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
- TNode<Context> context, TNode<JSTypedArray> typed_array,
- TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) {
- TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- SmiToInt32(CAST(value)), SMI_PARAMETERS);
- break;
- case UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- TruncateTaggedToWord32(context, value), SMI_PARAMETERS);
- break;
- case FLOAT32_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))),
- SMI_PARAMETERS);
- break;
- case FLOAT64_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS);
- break;
- case BIGUINT64_ELEMENTS:
- case BIGINT64_ELEMENTS:
- StoreElement(data_pointer, elements_kind, index_node,
- UncheckedCast<BigInt>(value), SMI_PARAMETERS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
+template <typename TIndex>
TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
- Node* object, Node* slot_index_node, int additional_offset,
- ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
+ TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot,
+ int additional_offset) {
int32_t header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size);
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size);
CSA_SLOW_ASSERT(
- this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
+ this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize));
- return UncheckedCast<MaybeObject>(
- Load(MachineType::AnyTagged(), object, offset));
+ return Load<MaybeObject>(feedback_vector, offset);
}
+template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<Smi> slot,
+ int additional_offset);
+template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot,
+ int additional_offset);
+template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ int additional_offset);
+
template <typename Array>
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
TNode<Array> object, int array_header_size, Node* index_node,
@@ -2617,6 +2680,13 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
return UncheckedCast<Float64T>(Load(machine_type, base, offset));
}
+TNode<BoolT> CodeStubAssembler::LoadContextHasExtensionField(
+ SloppyTNode<Context> context) {
+ TNode<IntPtrT> value =
+ LoadAndUntagObjectField(context, Context::kLengthOffset);
+ return IsSetWord<Context::HasExtensionField>(value);
+}
+
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, int slot_index) {
int offset = Context::SlotOffset(slot_index);
@@ -2626,15 +2696,15 @@ TNode<Object> CodeStubAssembler::LoadContextElement(
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0));
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
+ Context::SlotOffset(0));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context,
TNode<Smi> slot_index) {
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index, PACKED_ELEMENTS, SMI_PARAMETERS, Context::SlotOffset(0));
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS,
+ Context::SlotOffset(0));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
@@ -2949,33 +3019,30 @@ void CodeStubAssembler::StoreFixedDoubleArrayElement(
StoreNoWriteBarrier(rep, object, offset, value_silenced);
}
-void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
- Node* slot_index_node,
- Node* value,
- WriteBarrierMode barrier_mode,
- int additional_offset,
- ParameterMode parameter_mode) {
- CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
+void CodeStubAssembler::StoreFeedbackVectorSlot(
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<AnyTaggedT> value, WriteBarrierMode barrier_mode,
+ int additional_offset) {
DCHECK(IsAligned(additional_offset, kTaggedSize));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UNSAFE_SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size);
- // Check that slot_index_node <= object.length.
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(Signed(slot), HOLEY_ELEMENTS, header_size);
+ // Check that slot <= feedback_vector.length.
CSA_ASSERT(this,
- IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
+ IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, offset,
+ value);
} else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) {
- UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
- value);
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector,
+ offset, value);
} else {
- Store(object, offset, value);
+ Store(feedback_vector, offset, value);
}
}
@@ -3045,33 +3112,29 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
Label success(this);
TVARIABLE(Smi, var_tagged_length);
ParameterMode mode = OptimalParameterMode();
- VARIABLE(var_length, OptimalParameterRepresentation(),
- TaggedToParameter(LoadFastJSArrayLength(array), mode));
- VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
+ TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
+ TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
- Node* growth = IntPtrToParameter(
- IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
- first),
- mode);
+ TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
&var_elements, growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
CodeStubAssembler::VariableList push_vars({&var_length}, zone());
- Node* elements = var_elements.value();
+ TNode<FixedArrayBase> elements = var_elements.value();
args->ForEach(
push_vars,
- [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
+ [&](TNode<Object> arg) {
TryStoreArrayElement(kind, mode, &pre_bailout, elements,
var_length.value(), arg);
- Increment(&var_length, 1, mode);
+ Increment(&var_length);
},
- first, nullptr);
+ first);
{
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = BIntToSmi(var_length.value());
var_tagged_length = length;
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Goto(&success);
@@ -3111,8 +3174,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
CSA_SLOW_ASSERT(this, IsJSArray(array));
Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
ParameterMode mode = OptimalParameterMode();
- VARIABLE(var_length, OptimalParameterRepresentation(),
- TaggedToParameter(LoadFastJSArrayLength(array), mode));
+ TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
@@ -3124,9 +3186,9 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
// capacity.
TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
var_length.value(), value);
- Increment(&var_length, 1, mode);
+ Increment(&var_length);
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = BIntToSmi(var_length.value());
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
}
@@ -3138,7 +3200,7 @@ Node* CodeStubAssembler::AllocateCellWithValue(Node* value,
return result;
}
-Node* CodeStubAssembler::LoadCellValue(Node* cell) {
+TNode<Object> CodeStubAssembler::LoadCellValue(Node* cell) {
CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
return LoadObjectField(cell, Cell::kValueOffset);
}
@@ -3278,7 +3340,8 @@ TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
TNode<IntPtrT> raw_size =
GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
ByteArray::kHeaderSize + kObjectAlignmentMask);
- TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
@@ -3352,7 +3415,8 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
TNode<IntPtrT> raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
- TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
@@ -3423,7 +3487,8 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
TNode<IntPtrT> raw_size = GetArrayAllocationSize(
Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
- TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> size =
+ WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
@@ -3496,35 +3561,6 @@ TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
offset);
}
-TNode<String> CodeStubAssembler::AllocateConsString(TNode<Uint32T> length,
- TNode<String> left,
- TNode<String> right) {
- // Added string can be a cons string.
- Comment("Allocating ConsString");
- TNode<Int32T> left_instance_type = LoadInstanceType(left);
- TNode<Int32T> right_instance_type = LoadInstanceType(right);
-
- // Determine the resulting ConsString map to use depending on whether
- // any of {left} or {right} has two byte encoding.
- STATIC_ASSERT(kOneByteStringTag != 0);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- TNode<Int32T> combined_instance_type =
- Word32And(left_instance_type, right_instance_type);
- TNode<Map> result_map = CAST(Select<Object>(
- IsSetWord32(combined_instance_type, kStringEncodingMask),
- [=] { return ConsOneByteStringMapConstant(); },
- [=] { return ConsStringMapConstant(); }));
- TNode<HeapObject> result = AllocateInNewSpace(ConsString::kSize);
- StoreMapNoWriteBarrier(result, result_map);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
- MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
- Int32Constant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right);
- return CAST(result);
-}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
int at_least_space_for) {
@@ -3762,106 +3798,26 @@ template V8_EXPORT_PRIVATE TNode<SmallOrderedHashSet>
CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
TNode<IntPtrT> capacity);
-template <typename CollectionType>
-void CodeStubAssembler::FindOrderedHashTableEntry(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found) {
- // Get the index of the bucket.
- TNode<IntPtrT> const number_of_buckets =
- SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- CAST(table), CollectionType::NumberOfBucketsIndex())));
- TNode<WordT> const bucket =
- WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- TNode<IntPtrT> const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- CAST(table), bucket,
- CollectionType::HashTableStartIndex() * kTaggedSize)));
-
- // Walk the bucket chain.
- TNode<IntPtrT> entry_start;
- Label if_key_found(this);
- {
- TVARIABLE(IntPtrT, var_entry, first_entry);
- Label loop(this, {&var_entry, entry_start_position}),
- continue_next_entry(this);
- Goto(&loop);
- BIND(&loop);
-
- // If the entry index is the not-found sentinel, we are done.
- GotoIf(IntPtrEqual(var_entry.value(),
- IntPtrConstant(CollectionType::kNotFound)),
- not_found);
-
- // Make sure the entry index is within range.
- CSA_ASSERT(
- this,
- UintPtrLessThan(
- var_entry.value(),
- SmiUntag(SmiAdd(
- CAST(UnsafeLoadFixedArrayElement(
- CAST(table), CollectionType::NumberOfElementsIndex())),
- CAST(UnsafeLoadFixedArrayElement(
- CAST(table),
- CollectionType::NumberOfDeletedElementsIndex()))))));
-
- // Compute the index of the entry relative to kHashTableStartIndex.
- entry_start =
- IntPtrAdd(IntPtrMul(var_entry.value(),
- IntPtrConstant(CollectionType::kEntrySize)),
- number_of_buckets);
-
- // Load the key from the entry.
- TNode<Object> const candidate_key = UnsafeLoadFixedArrayElement(
- CAST(table), entry_start,
- CollectionType::HashTableStartIndex() * kTaggedSize);
-
- key_compare(candidate_key, &if_key_found, &continue_next_entry);
-
- BIND(&continue_next_entry);
- // Load the index of the next entry in the bucket chain.
- var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
- CAST(table), entry_start,
- (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) *
- kTaggedSize)));
-
- Goto(&loop);
- }
-
- BIND(&if_key_found);
- entry_start_position->Bind(entry_start);
- Goto(entry_found);
-}
-
-template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashMap>(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found);
-template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashSet>(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found);
-
Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
Comment("AllocateStruct");
CSA_ASSERT(this, IsMap(map));
TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map));
TNode<HeapObject> object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
- InitializeStructBody(object, map, size, Struct::kHeaderSize);
+ InitializeStructBody(object, size, Struct::kHeaderSize);
return object;
}
-void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
- Node* size, int start_offset) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+void CodeStubAssembler::InitializeStructBody(TNode<HeapObject> object,
+ TNode<IntPtrT> size,
+ int start_offset) {
Comment("InitializeStructBody");
TNode<Oddball> filler = UndefinedConstant();
// Calculate the untagged field addresses.
- object = BitcastTaggedToWord(object);
- TNode<WordT> start_address =
- IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
- TNode<WordT> end_address =
- IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
+ TNode<IntPtrT> start_address =
+ IntPtrAdd(BitcastTaggedToWord(object),
+ IntPtrConstant(start_offset - kHeapObjectTag));
+ TNode<IntPtrT> end_address = IntPtrAdd(start_address, size);
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
@@ -3883,8 +3839,9 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectFromMap(
- Node* object, Node* map, Node* instance_size, Node* properties,
- Node* elements, SlackTrackingMode slack_tracking_mode) {
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size, Node* properties, Node* elements,
+ SlackTrackingMode slack_tracking_mode) {
CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
@@ -3915,7 +3872,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
- Node* object, Node* map, Node* instance_size, int start_offset) {
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(
this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
@@ -3924,8 +3882,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
- Node* object, Node* map, Node* instance_size) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking");
// Perform in-object slack tracking if requested.
@@ -3953,9 +3911,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
// The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value.
- TNode<UintPtrT> used_size = TimesTaggedSize(ChangeUint32ToWord(
+ TNode<IntPtrT> used_size = Signed(TimesTaggedSize(ChangeUint32ToWord(
LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
- MachineType::Uint8())));
+ MachineType::Uint8()))));
Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size,
@@ -3984,19 +3942,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
BIND(&end);
}
-void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
- Node* end_address,
- Node* value) {
+void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
+ TNode<IntPtrT> end_address,
+ TNode<Object> value) {
Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
start_address, end_address,
- [this, value](Node* current) {
+ [=](TNode<IntPtrT> current) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value);
},
- kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ kTaggedSize, IndexAdvanceMode::kPost);
}
TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
@@ -4008,12 +3966,12 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
- Node* allocation_site, int array_header_size) {
+ TNode<AllocationSite> allocation_site, int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
- if (allocation_site != nullptr) {
+ if (!allocation_site.is_null()) {
base_size += AllocationMemento::kSize;
}
@@ -4027,8 +3985,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- Node* allocation_site, Node* capacity, ParameterMode capacity_mode,
- AllocationFlags allocation_flags, int array_header_size) {
+ TNode<AllocationSite> allocation_site, Node* capacity,
+ ParameterMode capacity_mode, AllocationFlags allocation_flags,
+ int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -4065,7 +4024,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
BIND(&nonempty);
{
int base_size = array_header_size;
- if (allocation_site != nullptr) base_size += AllocationMemento::kSize;
+ if (!allocation_site.is_null()) {
+ base_size += AllocationMemento::kSize;
+ }
const int elements_offset = base_size;
@@ -4138,8 +4099,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
}
TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
- TNode<Map> array_map, TNode<Smi> length, Node* allocation_site,
- TNode<IntPtrT> size_in_bytes) {
+ TNode<Map> array_map, TNode<Smi> length,
+ TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
@@ -4150,7 +4111,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- if (allocation_site != nullptr) {
+ if (!allocation_site.is_null()) {
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize),
allocation_site);
}
@@ -4160,7 +4121,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, Node* capacity, TNode<Smi> length,
- Node* allocation_site, ParameterMode capacity_mode,
+ TNode<AllocationSite> allocation_site, ParameterMode capacity_mode,
AllocationFlags allocation_flags) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode));
@@ -4189,10 +4150,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return array;
}
-Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
- Node* begin, Node* count,
- ParameterMode mode, Node* capacity,
- Node* allocation_site) {
+Node* CodeStubAssembler::ExtractFastJSArray(
+ TNode<Context> context, TNode<JSArray> array, Node* begin, Node* count,
+ ParameterMode mode, Node* capacity, TNode<AllocationSite> allocation_site) {
TNode<Map> original_array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map);
@@ -4209,18 +4169,16 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
return result;
}
-Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
- ParameterMode mode,
- Node* allocation_site,
- HoleConversionMode convert_holes) {
+TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
+ TNode<Context> context, TNode<JSArray> array, ParameterMode mode,
+ TNode<AllocationSite> allocation_site, HoleConversionMode convert_holes) {
// TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this
// function is also used to copy boilerplates even when the no-elements
// protector is invalid. This function should be renamed to reflect its uses.
- CSA_ASSERT(this, IsJSArray(array));
TNode<Number> length = LoadJSArrayLength(array);
- Node* new_elements = nullptr;
- VARIABLE(var_new_elements, MachineRepresentation::kTagged);
+ TNode<FixedArrayBase> new_elements;
+ TVARIABLE(FixedArrayBase, var_new_elements);
TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array)));
Label allocate_jsarray(this), holey_extract(this),
@@ -4240,7 +4198,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
TaggedToParameter(CAST(length), mode), nullptr,
ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
nullptr, var_elements_kind.value());
- var_new_elements.Bind(new_elements);
+ var_new_elements = new_elements;
Goto(&allocate_jsarray);
if (need_conversion) {
@@ -4257,7 +4215,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
LoadElements(array), IntPtrOrSmiConstant(0, mode),
TaggedToParameter(CAST(length), mode), nullptr,
ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
- var_new_elements.Bind(new_elements);
+ var_new_elements = new_elements;
// If the array type didn't change, use the original elements kind.
GotoIfNot(var_holes_converted.value(), &allocate_jsarray);
// Otherwise use PACKED_ELEMENTS for the target's elements kind.
@@ -4283,8 +4241,8 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
TNode<Map> array_map =
LoadJSArrayElementsMap(var_elements_kind.value(), native_context);
- TNode<JSArray> result = AllocateJSArray(
- array_map, CAST(var_new_elements.value()), CAST(length), allocation_site);
+ TNode<JSArray> result = AllocateJSArray(array_map, var_new_elements.value(),
+ CAST(length), allocation_site);
return result;
}
@@ -4555,14 +4513,14 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> first_from_element_offset =
ElementOffsetFromIndex(first, kind, mode, 0);
- TNode<WordT> limit_offset = IntPtrAdd(first_from_element_offset,
- IntPtrConstant(first_element_offset));
+ TNode<IntPtrT> limit_offset = IntPtrAdd(first_from_element_offset,
+ IntPtrConstant(first_element_offset));
TVARIABLE(IntPtrT, var_from_offset,
ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind,
mode, first_element_offset));
Label decrement(this, {&var_from_offset}), done(this);
- TNode<WordT> to_array_adjusted =
+ TNode<IntPtrT> to_array_adjusted =
IntPtrSub(BitcastTaggedToWord(to_elements), first_from_element_offset);
Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
@@ -4908,12 +4866,10 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
TNode<IntPtrT> elements_intptr = BitcastTaggedToWord(elements);
TNode<IntPtrT> target_data_ptr =
IntPtrAdd(elements_intptr,
- ElementOffsetFromIndex(dst_index, kind, INTPTR_PARAMETERS,
- fa_base_data_offset));
+ ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset));
TNode<IntPtrT> source_data_ptr =
IntPtrAdd(elements_intptr,
- ElementOffsetFromIndex(src_index, kind, INTPTR_PARAMETERS,
- fa_base_data_offset));
+ ElementOffsetFromIndex(src_index, kind, fa_base_data_offset));
TNode<ExternalReference> memmove =
ExternalConstant(ExternalReference::libc_memmove_function());
CallCFunction(memmove, MachineType::Pointer(),
@@ -4997,10 +4953,10 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind)));
static const int32_t fa_base_data_offset =
FixedArrayBase::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> src_offset_start = ElementOffsetFromIndex(
- src_index, kind, INTPTR_PARAMETERS, fa_base_data_offset);
- TNode<IntPtrT> dst_offset_start = ElementOffsetFromIndex(
- dst_index, kind, INTPTR_PARAMETERS, fa_base_data_offset);
+ TNode<IntPtrT> src_offset_start =
+ ElementOffsetFromIndex(src_index, kind, fa_base_data_offset);
+ TNode<IntPtrT> dst_offset_start =
+ ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset);
TNode<IntPtrT> src_elements_intptr = BitcastTaggedToWord(src_elements);
TNode<IntPtrT> source_data_ptr =
IntPtrAdd(src_elements_intptr, src_offset_start);
@@ -5283,65 +5239,6 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
Comment("] CopyPropertyArrayValues");
}
-void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
- TNode<IntPtrT> from_index,
- TNode<IntPtrT> to_index,
- TNode<IntPtrT> character_count,
- String::Encoding from_encoding,
- String::Encoding to_encoding) {
- // Cannot assert IsString(from_string) and IsString(to_string) here because
- // CSA::SubString can pass in faked sequential strings when handling external
- // subject strings.
- bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
- bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
- DCHECK_IMPLIES(to_one_byte, from_one_byte);
- Comment("CopyStringCharacters ",
- from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ",
- to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
-
- ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
- ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> from_offset = ElementOffsetFromIndex(
- from_index, from_kind, INTPTR_PARAMETERS, header_size);
- TNode<IntPtrT> to_offset =
- ElementOffsetFromIndex(to_index, to_kind, INTPTR_PARAMETERS, header_size);
- TNode<IntPtrT> byte_count =
- ElementOffsetFromIndex(character_count, from_kind, INTPTR_PARAMETERS);
- TNode<WordT> limit_offset = IntPtrAdd(from_offset, byte_count);
-
- // Prepare the fast loop
- MachineType type =
- from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
- MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
- : MachineRepresentation::kWord16;
- int from_increment = 1 << ElementsKindToShiftSize(from_kind);
- int to_increment = 1 << ElementsKindToShiftSize(to_kind);
-
- VARIABLE(current_to_offset, MachineType::PointerRepresentation(), to_offset);
- VariableList vars({&current_to_offset}, zone());
- int to_index_constant = 0, from_index_constant = 0;
- bool index_same = (from_encoding == to_encoding) &&
- (from_index == to_index ||
- (ToInt32Constant(from_index, &from_index_constant) &&
- ToInt32Constant(to_index, &to_index_constant) &&
- from_index_constant == to_index_constant));
- BuildFastLoop(
- vars, from_offset, limit_offset,
- [this, from_string, to_string, &current_to_offset, to_increment, type,
- rep, index_same](Node* offset) {
- Node* value = Load(type, from_string, offset);
- StoreNoWriteBarrier(rep, to_string,
- index_same ? offset : current_to_offset.value(),
- value);
- if (!index_same) {
- Increment(&current_to_offset, to_increment);
- }
- },
- from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
-}
-
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
Node* offset,
ElementsKind from_kind,
@@ -5381,9 +5278,9 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
return IntPtrOrSmiAdd(new_capacity, padding, mode);
}
-Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
- ElementsKind kind, Node* key,
- Label* bailout) {
+TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
+ Node* object, Node* elements, ElementsKind kind, Node* key,
+ Label* bailout) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
CSA_SLOW_ASSERT(this, TaggedIsSmi(key));
@@ -5395,11 +5292,9 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
TaggedToParameter(capacity, mode), mode, bailout);
}
-Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
- ElementsKind kind, Node* key,
- Node* capacity,
- ParameterMode mode,
- Label* bailout) {
+TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
+ Node* object, Node* elements, ElementsKind kind, Node* key, Node* capacity,
+ ParameterMode mode, Label* bailout) {
Comment("TryGrowElementsCapacity");
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
@@ -5418,7 +5313,7 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
new_capacity, mode, bailout);
}
-Node* CodeStubAssembler::GrowElementsCapacity(
+TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
Comment("[ GrowElementsCapacity");
@@ -5471,45 +5366,22 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base,
Comment("]");
}
-Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
- Label* if_valueisnotnumber) {
- Label out(this);
- VARIABLE(var_result, MachineRepresentation::kFloat64);
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- BIND(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToFloat64(value));
- Goto(&out);
- }
-
- BIND(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this);
- Branch(IsHeapNumber(value), &if_valueisheapnumber, if_valueisnotnumber);
-
- BIND(&if_valueisheapnumber);
- {
- // Load the floating point value.
- var_result.Bind(LoadHeapNumberValue(value));
- Goto(&out);
- }
- }
- BIND(&out);
- return var_result.value();
+TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
+ TNode<Object> value, Label* if_valueisnotnumber) {
+ return Select<Float64T>(
+ TaggedIsSmi(value), [&]() { return SmiToFloat64(CAST(value)); },
+ [&]() {
+ GotoIfNot(IsHeapNumber(CAST(value)), if_valueisnotnumber);
+ return LoadHeapNumberValue(CAST(value));
+ });
}
-Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
+ SloppyTNode<Context> context, SloppyTNode<Object> value) {
// We might need to loop once due to ToNumber conversion.
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kFloat64);
+ TVARIABLE(Object, var_value, value);
+ TVARIABLE(Float64T, var_result);
Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
Goto(&loop);
BIND(&loop);
{
@@ -5520,14 +5392,13 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
// Convert {value} to Float64 if it is a number and convert it to a number
// otherwise.
- Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber);
- var_result.Bind(result);
+ var_result = TryTaggedToFloat64(value, &if_valueisnotnumber);
Goto(&done_loop);
BIND(&if_valueisnotnumber);
{
// Convert the {value} to a Number first.
- var_value.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, value));
+ var_value = CallBuiltin(Builtins::kNonNumberToNumber, context, value);
Goto(&loop);
}
}
@@ -5535,8 +5406,9 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
- VARIABLE(var_result, MachineRepresentation::kWord32);
+TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(
+ SloppyTNode<Context> context, SloppyTNode<Object> value) {
+ TVARIABLE(Word32T, var_result);
Label done(this);
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
&done, &var_result);
@@ -5546,38 +5418,33 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// Truncate {value} to word32 and jump to {if_number} if it is a Number,
// or find that it is a BigInt and jump to {if_bigint}.
-void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value,
- Label* if_number,
- Variable* var_word32,
- Label* if_bigint,
- Variable* var_bigint) {
+void CodeStubAssembler::TaggedToWord32OrBigInt(
+ TNode<Context> context, TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
- context, value, if_number, var_word32, if_bigint, var_bigint);
+ context, value, if_number, var_word32, if_bigint, var_maybe_bigint);
}
// Truncate {value} to word32 and jump to {if_number} if it is a Number,
// or find that it is a BigInt and jump to {if_bigint}. In either case,
// store the type feedback in {var_feedback}.
void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback(
- Node* context, Node* value, Label* if_number, Variable* var_word32,
- Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
+ TNode<Context> context, TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>(
- context, value, if_number, var_word32, if_bigint, var_bigint,
+ context, value, if_number, var_word32, if_bigint, var_maybe_bigint,
var_feedback);
}
template <Object::Conversion conversion>
void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
- Node* context, Node* value, Label* if_number, Variable* var_word32,
- Label* if_bigint, Variable* var_bigint, Variable* var_feedback) {
- DCHECK(var_word32->rep() == MachineRepresentation::kWord32);
- DCHECK(var_bigint == nullptr ||
- var_bigint->rep() == MachineRepresentation::kTagged);
- DCHECK(var_feedback == nullptr ||
- var_feedback->rep() == MachineRepresentation::kTaggedSigned);
-
+ TNode<Context> context, TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32, Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint, TVariable<Smi>* var_feedback) {
// We might need to loop after conversion.
- VARIABLE(var_value, MachineRepresentation::kTagged, value);
+ TVARIABLE(Object, var_value, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone);
Variable* loop_vars[] = {&var_value, var_feedback};
int num_vars =
@@ -5592,12 +5459,13 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
GotoIf(TaggedIsNotSmi(value), &not_smi);
// {value} is a Smi.
- var_word32->Bind(SmiToInt32(value));
+ *var_word32 = SmiToInt32(CAST(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
BIND(&not_smi);
- TNode<Map> map = LoadMap(value);
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &is_heap_number);
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
if (conversion == Object::Conversion::kToNumeric) {
@@ -5610,7 +5478,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We do not require an Or with earlier feedback here because once we
// convert the value to a Numeric, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(CAST(var_feedback->value()),
+ CSA_ASSERT(this, SmiEqual(var_feedback->value(),
SmiConstant(BinaryOperationFeedback::kNone)));
}
GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball);
@@ -5618,25 +5486,25 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
auto builtin = conversion == Object::Conversion::kToNumeric
? Builtins::kNonNumberToNumeric
: Builtins::kNonNumberToNumber;
- var_value.Bind(CallBuiltin(builtin, context, value));
+ var_value = CallBuiltin(builtin, context, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(&loop);
BIND(&is_oddball);
- var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ var_value = LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
OverwriteFeedback(var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
Goto(&loop);
}
BIND(&is_heap_number);
- var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value)));
+ *var_word32 = TruncateHeapNumberValueToWord32(CAST(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(if_number);
if (conversion == Object::Conversion::kToNumeric) {
BIND(&is_bigint);
- var_bigint->Bind(value);
+ *var_maybe_bigint = value;
CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt);
Goto(if_bigint);
}
@@ -5650,14 +5518,14 @@ TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
}
void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
- TVariable<Smi>& var_result_smi,
+ TVariable<Smi>* var_result_smi,
Label* if_smi) {
TNode<Float64T> value = LoadHeapNumberValue(number);
TryFloat64ToSmi(value, var_result_smi, if_smi);
}
void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
- TVariable<Smi>& var_result_smi,
+ TVariable<Smi>* var_result_smi,
Label* if_smi) {
TNode<Int32T> value32 = RoundFloat64ToInt32(value);
TNode<Float64T> value64 = ChangeInt32ToFloat64(value32);
@@ -5674,13 +5542,13 @@ void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
BIND(&if_int32);
{
if (SmiValuesAre32Bits()) {
- var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
+ *var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32);
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, &if_heap_number);
- var_result_smi =
+ *var_result_smi =
BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
}
Goto(if_smi);
@@ -5693,7 +5561,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
Label if_smi(this), done(this);
TVARIABLE(Smi, var_smi_result);
TVARIABLE(Number, var_result);
- TryFloat64ToSmi(value, var_smi_result, &if_smi);
+ TryFloat64ToSmi(value, &var_smi_result, &if_smi);
var_result = AllocateHeapNumberWithValue(value);
Goto(&done);
@@ -6144,42 +6012,42 @@ TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) {
}
TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = NoElementsProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = ArrayIteratorProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
- TNode<Cell> cell = PromiseResolveProtectorConstant();
- TNode<Object> cell_value = LoadObjectField(cell, Cell::kValueOffset);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = PromiseResolveProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = PromiseThenProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = ArraySpeciesProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = TypedArraySpeciesProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
@@ -6190,12 +6058,12 @@ TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid(
TNode<PropertyCell> cell = CAST(LoadContextElement(
native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX));
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
return TaggedEqual(cell_value, invalid);
}
TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
- TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = PromiseSpeciesProtectorConstant();
TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
@@ -6394,6 +6262,10 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
return IsJSGlobalProxyMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsJSGeneratorMap(TNode<Map> map) {
+ return InstanceTypeEqual(LoadMapInstanceType(map), JS_GENERATOR_OBJECT_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
SloppyTNode<Int32T> instance_type) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
@@ -6428,6 +6300,11 @@ TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSRegExpStringIterator(
+ SloppyTNode<HeapObject> object) {
+ return HasInstanceType(object, JS_REG_EXP_STRING_ITERATOR_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) {
return IsMetaMap(LoadMap(map));
}
@@ -6656,7 +6533,7 @@ TNode<BoolT> CodeStubAssembler::IsBigInt(SloppyTNode<HeapObject> object) {
TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType(
SloppyTNode<Int32T> instance_type) {
return Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_PRIMITIVE_TYPE));
+ Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE));
}
TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
@@ -6716,8 +6593,7 @@ TNode<BoolT> CodeStubAssembler::IsNumberDictionary(
return HasInstanceType(object, NUMBER_DICTIONARY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(TNode<HeapObject> object) {
return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE);
}
@@ -6762,7 +6638,7 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, JS_REGEXP_TYPE);
+ return HasInstanceType(object, JS_REG_EXP_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsNumber(SloppyTNode<Object> object) {
@@ -7011,189 +6887,6 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
return CAST(var_result.value());
}
-// A wrapper around CopyStringCharacters which determines the correct string
-// encoding, allocates a corresponding sequential string, and then copies the
-// given character range using CopyStringCharacters.
-// |from_string| must be a sequential string.
-// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
-TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
- Node* from, Node* from_instance_type, TNode<IntPtrT> from_index,
- TNode<IntPtrT> character_count) {
- Label end(this), one_byte_sequential(this), two_byte_sequential(this);
- TVARIABLE(String, var_result);
-
- Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
- &two_byte_sequential);
-
- // The subject string is a sequential one-byte string.
- BIND(&one_byte_sequential);
- {
- TNode<String> result = AllocateSeqOneByteString(
- Unsigned(TruncateIntPtrToInt32(character_count)));
- CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
- character_count, String::ONE_BYTE_ENCODING,
- String::ONE_BYTE_ENCODING);
- var_result = result;
- Goto(&end);
- }
-
- // The subject string is a sequential two-byte string.
- BIND(&two_byte_sequential);
- {
- TNode<String> result = AllocateSeqTwoByteString(
- Unsigned(TruncateIntPtrToInt32(character_count)));
- CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
- character_count, String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING);
- var_result = result;
- Goto(&end);
- }
-
- BIND(&end);
- return var_result.value();
-}
-
-TNode<String> CodeStubAssembler::SubString(TNode<String> string,
- TNode<IntPtrT> from,
- TNode<IntPtrT> to) {
- TVARIABLE(String, var_result);
- ToDirectStringAssembler to_direct(state(), string);
- Label end(this), runtime(this);
-
- TNode<IntPtrT> const substr_length = IntPtrSub(to, from);
- TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
-
- // Begin dispatching based on substring length.
-
- Label original_string_or_invalid_length(this);
- GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length),
- &original_string_or_invalid_length);
-
- // A real substring (substr_length < string_length).
- Label empty(this);
- GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty);
-
- Label single_char(this);
- GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char);
-
- // Deal with different string types: update the index if necessary
- // and extract the underlying string.
-
- TNode<String> direct_string = to_direct.TryToDirect(&runtime);
- TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset());
- TNode<Int32T> const instance_type = to_direct.instance_type();
-
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label external_string(this);
- {
- if (FLAG_string_slices) {
- Label next(this);
-
- // Short slice. Copy instead of slicing.
- GotoIf(IntPtrLessThan(substr_length,
- IntPtrConstant(SlicedString::kMinLength)),
- &next);
-
- // Allocate new sliced string.
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- Label one_byte_slice(this), two_byte_slice(this);
- Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
- &one_byte_slice, &two_byte_slice);
-
- BIND(&one_byte_slice);
- {
- var_result = AllocateSlicedOneByteString(
- Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
- SmiTag(offset));
- Goto(&end);
- }
-
- BIND(&two_byte_slice);
- {
- var_result = AllocateSlicedTwoByteString(
- Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string,
- SmiTag(offset));
- Goto(&end);
- }
-
- BIND(&next);
- }
-
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- GotoIf(to_direct.is_external(), &external_string);
-
- var_result = AllocAndCopyStringCharacters(direct_string, instance_type,
- offset, substr_length);
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- Goto(&end);
- }
-
- // Handle external string.
- BIND(&external_string);
- {
- TNode<RawPtrT> const fake_sequential_string =
- to_direct.PointerToString(&runtime);
-
- var_result = AllocAndCopyStringCharacters(
- fake_sequential_string, instance_type, offset, substr_length);
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- Goto(&end);
- }
-
- BIND(&empty);
- {
- var_result = EmptyStringConstant();
- Goto(&end);
- }
-
- // Substrings of length 1 are generated through CharCodeAt and FromCharCode.
- BIND(&single_char);
- {
- TNode<Int32T> char_code = StringCharCodeAt(string, from);
- var_result = StringFromSingleCharCode(char_code);
- Goto(&end);
- }
-
- BIND(&original_string_or_invalid_length);
- {
- CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
-
- // Equal length - check if {from, to} == {0, str.length}.
- GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
-
- // Return the original string (substr_length == string_length).
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->sub_string_native(), 1);
-
- var_result = string;
- Goto(&end);
- }
-
- // Fall back to a runtime call.
- BIND(&runtime);
- {
- var_result =
- CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string,
- SmiTag(from), SmiTag(to)));
- Goto(&end);
- }
-
- BIND(&end);
- return var_result.value();
-}
-
ToDirectStringAssembler::ToDirectStringAssembler(
compiler::CodeAssemblerState* state, TNode<String> string, Flags flags)
: CodeStubAssembler(state),
@@ -7204,8 +6897,7 @@ ToDirectStringAssembler::ToDirectStringAssembler(
flags_(flags) {}
TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
- VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone());
- Label dispatch(this, vars);
+ Label dispatch(this, {&var_string_, &var_offset_, &var_instance_type_});
Label if_iscons(this);
Label if_isexternal(this);
Label if_issliced(this);
@@ -7333,232 +7025,6 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
return var_result.value();
}
-void CodeStubAssembler::BranchIfCanDerefIndirectString(
- TNode<String> string, TNode<Int32T> instance_type, Label* can_deref,
- Label* cannot_deref) {
- TNode<Int32T> representation =
- Word32And(instance_type, Int32Constant(kStringRepresentationMask));
- GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref);
- GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)),
- cannot_deref);
- // Cons string.
- TNode<String> rhs =
- LoadObjectField<String>(string, ConsString::kSecondOffset);
- GotoIf(IsEmptyString(rhs), can_deref);
- Goto(cannot_deref);
-}
-
-TNode<String> CodeStubAssembler::DerefIndirectString(
- TNode<String> string, TNode<Int32T> instance_type, Label* cannot_deref) {
- Label deref(this);
- BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref);
- BIND(&deref);
- STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
- static_cast<int>(ConsString::kFirstOffset));
- return LoadObjectField<String>(string, ThinString::kActualOffset);
-}
-
-void CodeStubAssembler::DerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type) {
-#ifdef DEBUG
- Label can_deref(this), cannot_deref(this);
- BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref,
- &cannot_deref);
- BIND(&cannot_deref);
- DebugBreak(); // Should be able to dereference string.
- Goto(&can_deref);
- BIND(&can_deref);
-#endif // DEBUG
-
- STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) ==
- static_cast<int>(ConsString::kFirstOffset));
- *var_string =
- LoadObjectField<String>(var_string->value(), ThinString::kActualOffset);
-}
-
-void CodeStubAssembler::MaybeDerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type,
- Label* did_deref,
- Label* cannot_deref) {
- Label deref(this);
- BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref,
- cannot_deref);
-
- BIND(&deref);
- {
- DerefIndirectString(var_string, instance_type);
- Goto(did_deref);
- }
-}
-
-void CodeStubAssembler::MaybeDerefIndirectStrings(
- TVariable<String>* var_left, TNode<Int32T> left_instance_type,
- TVariable<String>* var_right, TNode<Int32T> right_instance_type,
- Label* did_something) {
- Label did_nothing_left(this), did_something_left(this),
- didnt_do_anything(this);
- MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left,
- &did_nothing_left);
-
- BIND(&did_something_left);
- {
- MaybeDerefIndirectString(var_right, right_instance_type, did_something,
- did_something);
- }
-
- BIND(&did_nothing_left);
- {
- MaybeDerefIndirectString(var_right, right_instance_type, did_something,
- &didnt_do_anything);
- }
-
- BIND(&didnt_do_anything);
- // Fall through if neither string was an indirect string.
-}
-
-TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
- TNode<String> right) {
- TVARIABLE(String, result);
- Label check_right(this), runtime(this, Label::kDeferred), cons(this),
- done(this, &result), done_native(this, &result);
- Counters* counters = isolate()->counters();
-
- TNode<Uint32T> left_length = LoadStringLengthAsWord32(left);
- GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right);
- result = right;
- Goto(&done_native);
-
- BIND(&check_right);
- TNode<Uint32T> right_length = LoadStringLengthAsWord32(right);
- GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons);
- result = left;
- Goto(&done_native);
-
- BIND(&cons);
- {
- TNode<Uint32T> new_length = Uint32Add(left_length, right_length);
-
- // If new length is greater than String::kMaxLength, goto runtime to
- // throw. Note: we also need to invalidate the string length protector, so
- // can't just throw here directly.
- GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)),
- &runtime);
-
- TVARIABLE(String, var_left, left);
- TVARIABLE(String, var_right, right);
- Variable* input_vars[2] = {&var_left, &var_right};
- Label non_cons(this, 2, input_vars);
- Label slow(this, Label::kDeferred);
- GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)),
- &non_cons);
-
- result =
- AllocateConsString(new_length, var_left.value(), var_right.value());
- Goto(&done_native);
-
- BIND(&non_cons);
-
- Comment("Full string concatenate");
- TNode<Int32T> left_instance_type = LoadInstanceType(var_left.value());
- TNode<Int32T> right_instance_type = LoadInstanceType(var_right.value());
- // Compute intersection and difference of instance types.
-
- TNode<Int32T> ored_instance_types =
- Word32Or(left_instance_type, right_instance_type);
- TNode<Word32T> xored_instance_types =
- Word32Xor(left_instance_type, right_instance_type);
-
- // Check if both strings have the same encoding and both are sequential.
- GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime);
- GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow);
-
- TNode<IntPtrT> word_left_length = Signed(ChangeUint32ToWord(left_length));
- TNode<IntPtrT> word_right_length = Signed(ChangeUint32ToWord(right_length));
-
- Label two_byte(this);
- GotoIf(Word32Equal(Word32And(ored_instance_types,
- Int32Constant(kStringEncodingMask)),
- Int32Constant(kTwoByteStringTag)),
- &two_byte);
- // One-byte sequential string case
- result = AllocateSeqOneByteString(new_length);
- CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
- IntPtrConstant(0), word_left_length,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
- word_left_length, word_right_length,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- Goto(&done_native);
-
- BIND(&two_byte);
- {
- // Two-byte sequential string case
- result = AllocateSeqTwoByteString(new_length);
- CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
- IntPtrConstant(0), word_left_length,
- String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
- word_left_length, word_right_length,
- String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING);
- Goto(&done_native);
- }
-
- BIND(&slow);
- {
- // Try to unwrap indirect strings, restart the above attempt on success.
- MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right,
- right_instance_type, &non_cons);
- Goto(&runtime);
- }
- }
- BIND(&runtime);
- {
- result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right));
- Goto(&done);
- }
-
- BIND(&done_native);
- {
- IncrementCounter(counters->string_add_native(), 1);
- Goto(&done);
- }
-
- BIND(&done);
- return result.value();
-}
-
-TNode<String> CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint(
- TNode<Int32T> codepoint) {
- VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
-
- Label if_isword16(this), if_isword32(this), return_result(this);
-
- Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16,
- &if_isword32);
-
- BIND(&if_isword16);
- {
- var_result.Bind(StringFromSingleCharCode(codepoint));
- Goto(&return_result);
- }
-
- BIND(&if_isword32);
- {
- TNode<String> value = AllocateSeqTwoByteString(2);
- StoreNoWriteBarrier(
- MachineRepresentation::kWord32, value,
- IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
- codepoint);
- var_result.Bind(value);
- Goto(&return_result);
- }
-
- BIND(&return_result);
- return CAST(var_result.value());
-}
-
TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -7585,22 +7051,22 @@ TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
return var_result.value();
}
-TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
+TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input,
+ Label* bailout) {
TVARIABLE(String, result);
TVARIABLE(Smi, smi_input);
- Label runtime(this, Label::kDeferred), if_smi(this), if_heap_number(this),
- done(this, &result);
+ Label if_smi(this), if_heap_number(this), done(this, &result);
// Load the number string cache.
TNode<FixedArray> number_string_cache = NumberStringCacheConstant();
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- // TODO(ishell): cleanup mask handling.
- TNode<IntPtrT> mask =
- BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache));
- TNode<IntPtrT> one = IntPtrConstant(1);
- mask = IntPtrSub(mask, one);
+ TNode<IntPtrT> number_string_cache_length =
+ LoadAndUntagFixedArrayBaseLength(number_string_cache);
+ TNode<Int32T> one = Int32Constant(1);
+ TNode<Word32T> mask = Int32Sub(
+ Word32Shr(TruncateWordToInt32(number_string_cache_length), one), one);
GotoIfNot(TaggedIsSmi(input), &if_heap_number);
smi_input = CAST(input);
@@ -7611,36 +7077,35 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
Comment("NumberToString - HeapNumber");
TNode<HeapNumber> heap_number_input = CAST(input);
// Try normalizing the HeapNumber.
- TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi);
+ TryHeapNumberToSmi(heap_number_input, &smi_input, &if_smi);
// Make a hash from the two 32-bit values of the double.
TNode<Int32T> low =
LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset);
TNode<Int32T> high = LoadObjectField<Int32T>(
heap_number_input, HeapNumber::kValueOffset + kIntSize);
- TNode<Word32T> hash = Word32Xor(low, high);
- TNode<IntPtrT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
- TNode<WordT> index =
- WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
+ TNode<Word32T> hash = Word32And(Word32Xor(low, high), mask);
+ TNode<IntPtrT> entry_index =
+ Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
// Cache entry's key must be a heap number
TNode<Object> number_key =
- UnsafeLoadFixedArrayElement(number_string_cache, index);
- GotoIf(TaggedIsSmi(number_key), &runtime);
+ UnsafeLoadFixedArrayElement(number_string_cache, entry_index);
+ GotoIf(TaggedIsSmi(number_key), bailout);
TNode<HeapObject> number_key_heap_object = CAST(number_key);
- GotoIfNot(IsHeapNumber(number_key_heap_object), &runtime);
+ GotoIfNot(IsHeapNumber(number_key_heap_object), bailout);
// Cache entry's key must match the heap number value we're looking for.
TNode<Int32T> low_compare = LoadObjectField<Int32T>(
number_key_heap_object, HeapNumber::kValueOffset);
TNode<Int32T> high_compare = LoadObjectField<Int32T>(
number_key_heap_object, HeapNumber::kValueOffset + kIntSize);
- GotoIfNot(Word32Equal(low, low_compare), &runtime);
- GotoIfNot(Word32Equal(high, high_compare), &runtime);
+ GotoIfNot(Word32Equal(low, low_compare), bailout);
+ GotoIfNot(Word32Equal(high, high_compare), bailout);
// Heap number match, return value from cache entry.
- result = CAST(
- UnsafeLoadFixedArrayElement(number_string_cache, index, kTaggedSize));
+ result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
+ kTaggedSize));
Goto(&done);
}
@@ -7648,17 +7113,28 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
{
Comment("NumberToString - Smi");
// Load the smi key, make sure it matches the smi we're looking for.
- TNode<Object> smi_index = BitcastWordToTagged(WordAnd(
- WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask));
+ TNode<Word32T> hash = Word32And(SmiToInt32(smi_input.value()), mask);
+ TNode<IntPtrT> entry_index =
+ Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
TNode<Object> smi_key = UnsafeLoadFixedArrayElement(
- number_string_cache, smi_index, 0, SMI_PARAMETERS);
- GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &runtime);
+ number_string_cache, entry_index, 0, INTPTR_PARAMETERS);
+ GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout);
// Smi match, return value from cache entry.
- result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, smi_index,
- kTaggedSize, SMI_PARAMETERS));
+ result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
+ kTaggedSize, INTPTR_PARAMETERS));
Goto(&done);
}
+ BIND(&done);
+ return result.value();
+}
+
+TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
+ TVARIABLE(String, result);
+ Label runtime(this, Label::kDeferred), done(this, &result);
+
+ result = NumberToString(input, &runtime);
+ Goto(&done);
BIND(&runtime);
{
@@ -8290,102 +7766,129 @@ void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
}
}
-void CodeStubAssembler::Increment(Variable* variable, int value,
- ParameterMode mode) {
- DCHECK_IMPLIES(mode == INTPTR_PARAMETERS,
- variable->rep() == MachineType::PointerRepresentation());
- DCHECK_IMPLIES(mode == SMI_PARAMETERS, CanBeTaggedSigned(variable->rep()));
- variable->Bind(IntPtrOrSmiAdd(variable->value(),
- IntPtrOrSmiConstant(value, mode), mode));
+template <typename TIndex>
+void CodeStubAssembler::Increment(TVariable<TIndex>* variable, int value) {
+ *variable =
+ IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant<TIndex>(value));
}
+// Instantiate Increment for Smi and IntPtrT.
+// TODO(v8:9708): Consider renaming to [Smi|IntPtrT|RawPtrT]Increment.
+template void CodeStubAssembler::Increment<Smi>(TVariable<Smi>* variable,
+ int value);
+template void CodeStubAssembler::Increment<IntPtrT>(
+ TVariable<IntPtrT>* variable, int value);
+template void CodeStubAssembler::Increment<RawPtrT>(
+ TVariable<RawPtrT>* variable, int value);
+
void CodeStubAssembler::Use(Label* label) {
GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label);
}
-void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
- Variable* var_index, Label* if_keyisunique,
- Variable* var_unique, Label* if_bailout,
+void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
+ TVariable<IntPtrT>* var_index,
+ Label* if_keyisunique,
+ TVariable<Name>* var_unique,
+ Label* if_bailout,
Label* if_notinternalized) {
- DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
- DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep());
Comment("TryToName");
- Label if_hascachedindex(this), if_keyisnotindex(this), if_thinstring(this),
- if_keyisother(this, Label::kDeferred);
+ Label if_keyisnotindex(this);
// Handle Smi and HeapNumber keys.
- var_index->Bind(TryToIntptr(key, &if_keyisnotindex));
+ *var_index = TryToIntptr(key, &if_keyisnotindex);
Goto(if_keyisindex);
BIND(&if_keyisnotindex);
- TNode<Map> key_map = LoadMap(key);
- var_unique->Bind(key);
- // Symbols are unique.
- GotoIf(IsSymbolMap(key_map), if_keyisunique);
- TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
- // Miss if |key| is not a String.
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- GotoIfNot(IsStringInstanceType(key_instance_type), &if_keyisother);
-
- // |key| is a String. Check if it has a cached array index.
- TNode<Uint32T> hash = LoadNameHashField(key);
- GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
- &if_hascachedindex);
- // No cached array index. If the string knows that it contains an index,
- // then it must be an uncacheable index. Handle this case in the runtime.
- GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
- // Check if we have a ThinString.
- GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE),
- &if_thinstring);
- GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE),
- &if_thinstring);
- // Finally, check if |key| is internalized.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask),
- if_notinternalized != nullptr ? if_notinternalized : if_bailout);
- Goto(if_keyisunique);
+ {
+ Label if_symbol(this), if_string(this),
+ if_keyisother(this, Label::kDeferred);
+ TNode<HeapObject> key_heap_object = CAST(key);
+ TNode<Map> key_map = LoadMap(key_heap_object);
- BIND(&if_thinstring);
- var_unique->Bind(
- LoadObjectField<String>(CAST(key), ThinString::kActualOffset));
- Goto(if_keyisunique);
+ GotoIf(IsSymbolMap(key_map), &if_symbol);
- BIND(&if_hascachedindex);
- var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
- Goto(if_keyisindex);
+ // Miss if |key| is not a String.
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map);
+ Branch(IsStringInstanceType(key_instance_type), &if_string, &if_keyisother);
+
+ // Symbols are unique.
+ BIND(&if_symbol);
+ {
+ *var_unique = CAST(key);
+ Goto(if_keyisunique);
+ }
- BIND(&if_keyisother);
- GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout);
- var_unique->Bind(LoadObjectField(key, Oddball::kToStringOffset));
- Goto(if_keyisunique);
+ BIND(&if_string);
+ {
+ Label if_hascachedindex(this), if_thinstring(this);
+
+ // |key| is a String. Check if it has a cached array index.
+ TNode<String> key_string = CAST(key);
+ TNode<Uint32T> hash = LoadNameHashField(key_string);
+ GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
+ &if_hascachedindex);
+ // No cached array index. If the string knows that it contains an index,
+ // then it must be an uncacheable index. Handle this case in the runtime.
+ GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
+ // Check if we have a ThinString.
+ GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE),
+ &if_thinstring);
+ GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE),
+ &if_thinstring);
+ // Finally, check if |key| is internalized.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask),
+ if_notinternalized != nullptr ? if_notinternalized : if_bailout);
+
+ *var_unique = key_string;
+ Goto(if_keyisunique);
+
+ BIND(&if_thinstring);
+ *var_unique =
+ LoadObjectField<String>(key_string, ThinString::kActualOffset);
+ Goto(if_keyisunique);
+
+ BIND(&if_hascachedindex);
+ *var_index =
+ Signed(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
+ Goto(if_keyisindex);
+ }
+
+ BIND(&if_keyisother);
+ {
+ GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout);
+ *var_unique =
+ LoadObjectField<String>(key_heap_object, Oddball::kToStringOffset);
+ Goto(if_keyisunique);
+ }
+ }
}
void CodeStubAssembler::TryInternalizeString(
- Node* string, Label* if_index, Variable* var_index, Label* if_internalized,
- Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) {
- DCHECK(var_index->rep() == MachineType::PointerRepresentation());
- DCHECK_EQ(var_internalized->rep(), MachineRepresentation::kTagged);
- CSA_SLOW_ASSERT(this, IsString(string));
+ SloppyTNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
+ Label* if_internalized, TVariable<Name>* var_internalized,
+ Label* if_not_internalized, Label* if_bailout) {
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::try_internalize_string_function());
TNode<ExternalReference> const isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- Node* result =
- CallCFunction(function, MachineType::AnyTagged(),
- std::make_pair(MachineType::Pointer(), isolate_ptr),
- std::make_pair(MachineType::AnyTagged(), string));
+ TNode<Object> result =
+ CAST(CallCFunction(function, MachineType::AnyTagged(),
+ std::make_pair(MachineType::Pointer(), isolate_ptr),
+ std::make_pair(MachineType::AnyTagged(), string)));
Label internalized(this);
GotoIf(TaggedIsNotSmi(result), &internalized);
- TNode<IntPtrT> word_result = SmiUntag(result);
+ TNode<IntPtrT> word_result = SmiUntag(CAST(result));
GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)),
if_not_internalized);
GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)),
if_bailout);
- var_index->Bind(word_result);
+ *var_index = word_result;
Goto(if_index);
BIND(&internalized);
- var_internalized->Bind(result);
+ *var_internalized = CAST(result);
Goto(if_internalized);
}
@@ -8712,31 +8215,6 @@ TNode<Object> CodeStubAssembler::BasicLoadNumberDictionaryElement(
return LoadValueByKeyIndex<NumberDictionary>(dictionary, index);
}
-void CodeStubAssembler::BasicStoreNumberDictionaryElement(
- TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
- TNode<Object> value, Label* not_data, Label* if_hole, Label* read_only) {
- TVARIABLE(IntPtrT, var_entry);
- Label if_found(this);
- NumberDictionaryLookup(dictionary, intptr_index, &if_found, &var_entry,
- if_hole);
- BIND(&if_found);
-
- // Check that the value is a data property.
- TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(var_entry.value());
- TNode<Uint32T> details =
- LoadDetailsByKeyIndex<NumberDictionary>(dictionary, index);
- TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
- // TODO(jkummerow): Support accessors without missing?
- GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data);
-
- // Check that the property is writeable.
- GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
- read_only);
-
- // Finally, store the value.
- StoreValueByKeyIndex<NumberDictionary>(dictionary, index, value);
-}
-
template <class Dictionary>
void CodeStubAssembler::FindInsertionEntry(TNode<Dictionary> dictionary,
TNode<Name> key,
@@ -8858,16 +8336,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
first_inclusive,
IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor));
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
last_exclusive, first_inclusive,
- [=](SloppyTNode<IntPtrT> name_index) {
+ [=](TNode<IntPtrT> name_index) {
TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize, name_index);
TNode<Name> candidate_name = CAST(element);
*var_name_index = name_index;
GotoIf(TaggedEqual(candidate_name, unique_name), if_found);
},
- -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
+ -Array::kEntrySize, IndexAdvanceMode::kPre);
Goto(if_not_found);
}
@@ -9029,7 +8507,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
TNode<Uint16T> type = LoadMapInstanceType(map);
TNode<Uint32T> bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout);
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ TVARIABLE(DescriptorArray, var_descriptors, LoadMapDescriptors(map));
TNode<Uint32T> nof_descriptors =
DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
@@ -9044,25 +8522,23 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
// Note: var_end_key_index is exclusive for the loop
TVARIABLE(IntPtrT, var_end_key_index,
ToKeyIndex<DescriptorArray>(nof_descriptors));
- VariableList list(
- {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
- &var_start_key_index, &var_end_key_index},
- zone());
+ VariableList list({&var_descriptors, &var_stable, &var_has_symbol,
+ &var_is_symbol_processing_loop, &var_start_key_index,
+ &var_end_key_index},
+ zone());
Label descriptor_array_loop(
- this, {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
- &var_start_key_index, &var_end_key_index});
+ this, {&var_descriptors, &var_stable, &var_has_symbol,
+ &var_is_symbol_processing_loop, &var_start_key_index,
+ &var_end_key_index});
Goto(&descriptor_array_loop);
BIND(&descriptor_array_loop);
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
list, var_start_key_index.value(), var_end_key_index.value(),
- [=, &var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
- &var_start_key_index, &var_end_key_index](Node* index) {
- TNode<IntPtrT> descriptor_key_index =
- TNode<IntPtrT>::UncheckedCast(index);
+ [&](TNode<IntPtrT> descriptor_key_index) {
TNode<Name> next_key =
- LoadKeyByKeyIndex(descriptors, descriptor_key_index);
+ LoadKeyByKeyIndex(var_descriptors.value(), descriptor_key_index);
TVARIABLE(Object, var_value, SmiConstant(0));
Label callback(this), next_iteration(this);
@@ -9117,7 +8593,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
// Directly decode from the descriptor array if |object| did not
// change shape.
var_map = map;
- var_meta_storage = descriptors;
+ var_meta_storage = var_descriptors.value();
var_entry = Signed(descriptor_key_index);
Goto(&if_found_fast);
}
@@ -9183,19 +8659,21 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
BIND(&callback);
body(next_key, var_value.value());
- // Check if |object| is still stable, i.e. we can proceed using
- // property details from preloaded |descriptors|.
- var_stable = Select<BoolT>(
- var_stable.value(),
- [=] { return TaggedEqual(LoadMap(object), map); },
- [=] { return Int32FalseConstant(); });
+ // Check if |object| is still stable, i.e. the descriptors in the
+ // preloaded |descriptors| are still the same modulo in-place
+ // representation changes.
+ GotoIfNot(var_stable.value(), &next_iteration);
+ var_stable = TaggedEqual(LoadMap(object), map);
+ // Reload the descriptors just in case the actual array changed, and
+ // any of the field representations changed in-place.
+ var_descriptors = LoadMapDescriptors(map);
Goto(&next_iteration);
}
}
BIND(&next_iteration);
},
- DescriptorArray::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ DescriptorArray::kEntrySize, IndexAdvanceMode::kPost);
if (mode == kEnumerationOrder) {
Label done(this);
@@ -9205,14 +8683,73 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
var_is_symbol_processing_loop = Int32TrueConstant();
// Add DescriptorArray::kEntrySize to make the var_end_key_index exclusive
// as BuildFastLoop() expects.
- Increment(&var_end_key_index, DescriptorArray::kEntrySize,
- INTPTR_PARAMETERS);
+ Increment(&var_end_key_index, DescriptorArray::kEntrySize);
Goto(&descriptor_array_loop);
BIND(&done);
}
}
+TNode<Object> CodeStubAssembler::GetConstructor(TNode<Map> map) {
+ TVARIABLE(HeapObject, var_maybe_constructor);
+ var_maybe_constructor = map;
+ Label loop(this, &var_maybe_constructor), done(this);
+ GotoIfNot(IsMap(var_maybe_constructor.value()), &done);
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ var_maybe_constructor = CAST(LoadObjectField(
+ var_maybe_constructor.value(), Map::kConstructorOrBackPointerOffset));
+ GotoIf(IsMap(var_maybe_constructor.value()), &loop);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_maybe_constructor.value();
+}
+
+TNode<NativeContext> CodeStubAssembler::GetCreationContext(
+ TNode<JSReceiver> receiver, Label* if_bailout) {
+ TNode<Map> receiver_map = LoadMap(receiver);
+ TNode<Object> constructor = GetConstructor(receiver_map);
+
+ TVARIABLE(JSFunction, var_function);
+
+ Label done(this), if_jsfunction(this), if_jsgenerator(this);
+ GotoIf(TaggedIsSmi(constructor), if_bailout);
+
+ TNode<Map> function_map = LoadMap(CAST(constructor));
+ GotoIf(IsJSFunctionMap(function_map), &if_jsfunction);
+ GotoIf(IsJSGeneratorMap(function_map), &if_jsgenerator);
+ // Remote objects don't have a creation context.
+ GotoIf(IsFunctionTemplateInfoMap(function_map), if_bailout);
+
+ CSA_ASSERT(this, IsJSFunctionMap(receiver_map));
+ var_function = CAST(receiver);
+ Goto(&done);
+
+ BIND(&if_jsfunction);
+ {
+ var_function = CAST(constructor);
+ Goto(&done);
+ }
+
+ BIND(&if_jsgenerator);
+ {
+ var_function = LoadJSGeneratorObjectFunction(CAST(receiver));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ TNode<Context> context = LoadJSFunctionContext(var_function.value());
+
+ GotoIfNot(IsContext(context), if_bailout);
+
+ TNode<NativeContext> native_context = LoadNativeContext(context);
+ return native_context;
+}
+
void CodeStubAssembler::DescriptorLookup(
SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors,
SloppyTNode<Uint32T> bitfield3, Label* if_found,
@@ -9302,7 +8839,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
void CodeStubAssembler::TryLookupProperty(
- SloppyTNode<JSObject> object, SloppyTNode<Map> map,
+ SloppyTNode<JSReceiver> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name,
Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
@@ -9310,7 +8847,7 @@ void CodeStubAssembler::TryLookupProperty(
Label if_objectisspecial(this);
GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial);
- TryLookupPropertyInSimpleObject(object, map, unique_name, if_found_fast,
+ TryLookupPropertyInSimpleObject(CAST(object), map, unique_name, if_found_fast,
if_found_dict, var_meta_storage,
var_name_index, if_not_found);
@@ -9547,25 +9084,44 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
// AccessorPair case.
{
if (mode == kCallJSGetter) {
+ Label if_callable(this), if_function_template_info(this);
Node* accessor_pair = value;
TNode<HeapObject> getter =
CAST(LoadObjectField(accessor_pair, AccessorPair::kGetterOffset));
TNode<Map> getter_map = LoadMap(getter);
- TNode<Uint16T> instance_type = LoadMapInstanceType(getter_map);
- // FunctionTemplateInfo getters are not supported yet.
- GotoIf(InstanceTypeEqual(instance_type, FUNCTION_TEMPLATE_INFO_TYPE),
- if_bailout);
+
+ GotoIf(IsCallableMap(getter_map), &if_callable);
+ GotoIf(IsFunctionTemplateInfoMap(getter_map), &if_function_template_info);
// Return undefined if the {getter} is not callable.
var_value.Bind(UndefinedConstant());
- GotoIfNot(IsCallableMap(getter_map), &done);
+ Goto(&done);
+
+ BIND(&if_callable);
+ {
+ // Call the accessor.
+ Callable callable = CodeFactory::Call(isolate());
+ Node* result = CallJS(callable, context, getter, receiver);
+ var_value.Bind(result);
+ Goto(&done);
+ }
- // Call the accessor.
- Callable callable = CodeFactory::Call(isolate());
- Node* result = CallJS(callable, context, getter, receiver);
- var_value.Bind(result);
+ BIND(&if_function_template_info);
+ {
+ TNode<HeapObject> cached_property_name = LoadObjectField<HeapObject>(
+ getter, FunctionTemplateInfo::kCachedPropertyNameOffset);
+ GotoIfNot(IsTheHole(cached_property_name), if_bailout);
+
+ TNode<NativeContext> creation_context =
+ GetCreationContext(CAST(receiver), if_bailout);
+ var_value.Bind(CallBuiltin(
+ Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
+ creation_context, getter, IntPtrConstant(0), receiver));
+ Goto(&done);
+ }
+ } else {
+ Goto(&done);
}
- Goto(&done);
}
// AccessorInfo case.
@@ -9617,10 +9173,11 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
GotoIfNot(IsLengthString(
LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
if_bailout);
- Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
+ TNode<Object> receiver_value =
+ LoadJSPrimitiveWrapperValue(CAST(receiver));
GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout);
- GotoIfNot(IsString(receiver_value), if_bailout);
- var_value.Bind(LoadStringLengthAsSmi(receiver_value));
+ GotoIfNot(IsString(CAST(receiver_value)), if_bailout);
+ var_value.Bind(LoadStringLengthAsSmi(CAST(receiver_value)));
Goto(&done);
}
}
@@ -9808,18 +9365,14 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_isfaststringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
- Node* string = LoadJSPrimitiveWrapperValue(object);
- CSA_ASSERT(this, IsString(string));
+ TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(CAST(object)));
TNode<IntPtrT> length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
Goto(&if_isobjectorsmi);
}
BIND(&if_isslowstringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
- Node* string = LoadJSPrimitiveWrapperValue(object);
- CSA_ASSERT(this, IsString(string));
+ TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(CAST(object)));
TNode<IntPtrT> length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
Goto(&if_isdictionary);
@@ -9892,8 +9445,8 @@ void CodeStubAssembler::TryPrototypeChainLookup(
GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
}
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_index);
+ TVARIABLE(Name, var_unique);
Label if_keyisindex(this), if_iskeyunique(this);
TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_unique,
@@ -9905,9 +9458,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
TVARIABLE(Map, var_holder_map, map);
TVARIABLE(Int32T, var_holder_instance_type, instance_type);
- VariableList merged_variables(
- {&var_holder, &var_holder_map, &var_holder_instance_type}, zone());
- Label loop(this, merged_variables);
+ Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type});
Goto(&loop);
BIND(&loop);
{
@@ -9950,9 +9501,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
TVARIABLE(Map, var_holder_map, map);
TVARIABLE(Int32T, var_holder_instance_type, instance_type);
- VariableList merged_variables(
- {&var_holder, &var_holder_map, &var_holder_instance_type}, zone());
- Label loop(this, merged_variables);
+ Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type});
Goto(&loop);
BIND(&loop);
{
@@ -9978,22 +9527,22 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
}
-Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
- SloppyTNode<Object> prototype) {
- CSA_ASSERT(this, TaggedIsNotSmi(object));
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Oddball> CodeStubAssembler::HasInPrototypeChain(TNode<Context> context,
+ TNode<HeapObject> object,
+ TNode<Object> prototype) {
+ TVARIABLE(Oddball, var_result);
Label return_false(this), return_true(this),
return_runtime(this, Label::kDeferred), return_result(this);
// Loop through the prototype chain looking for the {prototype}.
- VARIABLE(var_object_map, MachineRepresentation::kTagged, LoadMap(object));
+ TVARIABLE(Map, var_object_map, LoadMap(object));
Label loop(this, &var_object_map);
Goto(&loop);
BIND(&loop);
{
// Check if we can determine the prototype directly from the {object_map}.
Label if_objectisdirect(this), if_objectisspecial(this, Label::kDeferred);
- Node* object_map = var_object_map.value();
+ TNode<Map> object_map = var_object_map.value();
TNode<Uint16T> object_instance_type = LoadMapInstanceType(object_map);
Branch(IsSpecialReceiverInstanceType(object_instance_type),
&if_objectisspecial, &if_objectisdirect);
@@ -10018,22 +9567,22 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
// Continue with the prototype.
CSA_ASSERT(this, TaggedIsNotSmi(object_prototype));
- var_object_map.Bind(LoadMap(object_prototype));
+ var_object_map = LoadMap(object_prototype);
Goto(&loop);
}
BIND(&return_true);
- var_result.Bind(TrueConstant());
+ var_result = TrueConstant();
Goto(&return_result);
BIND(&return_false);
- var_result.Bind(FalseConstant());
+ var_result = FalseConstant();
Goto(&return_result);
BIND(&return_runtime);
{
// Fallback to the runtime implementation.
- var_result.Bind(
+ var_result = CAST(
CallRuntime(Runtime::kHasInPrototypeChain, context, object, prototype));
}
Goto(&return_result);
@@ -10042,63 +9591,67 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
return var_result.value();
}
-Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
- Node* object) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Oddball> CodeStubAssembler::OrdinaryHasInstance(
+ TNode<Context> context, TNode<Object> callable_maybe_smi,
+ TNode<Object> object_maybe_smi) {
+ TVARIABLE(Oddball, var_result);
Label return_runtime(this, Label::kDeferred), return_result(this);
GotoIfForceSlowPath(&return_runtime);
// Goto runtime if {object} is a Smi.
- GotoIf(TaggedIsSmi(object), &return_runtime);
+ GotoIf(TaggedIsSmi(object_maybe_smi), &return_runtime);
// Goto runtime if {callable} is a Smi.
- GotoIf(TaggedIsSmi(callable), &return_runtime);
-
- // Load map of {callable}.
- TNode<Map> callable_map = LoadMap(callable);
-
- // Goto runtime if {callable} is not a JSFunction.
- TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map);
- GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
- &return_runtime);
+ GotoIf(TaggedIsSmi(callable_maybe_smi), &return_runtime);
- GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map,
- &return_runtime);
-
- // Get the "prototype" (or initial map) of the {callable}.
- TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>(
- CAST(callable), JSFunction::kPrototypeOrInitialMapOffset);
{
- Label no_initial_map(this), walk_prototype_chain(this);
- TVARIABLE(HeapObject, var_callable_prototype, callable_prototype);
+ // Load map of {callable}.
+ TNode<HeapObject> object = CAST(object_maybe_smi);
+ TNode<HeapObject> callable = CAST(callable_maybe_smi);
+ TNode<Map> callable_map = LoadMap(callable);
- // Resolve the "prototype" if the {callable} has an initial map.
- GotoIfNot(IsMap(callable_prototype), &no_initial_map);
- var_callable_prototype =
- LoadObjectField<HeapObject>(callable_prototype, Map::kPrototypeOffset);
- Goto(&walk_prototype_chain);
+ // Goto runtime if {callable} is not a JSFunction.
+ TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map);
+ GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
+ &return_runtime);
- BIND(&no_initial_map);
- // {callable_prototype} is the hole if the "prototype" property hasn't been
- // requested so far.
- Branch(TaggedEqual(callable_prototype, TheHoleConstant()), &return_runtime,
- &walk_prototype_chain);
+ GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map,
+ &return_runtime);
- BIND(&walk_prototype_chain);
- callable_prototype = var_callable_prototype.value();
- }
+ // Get the "prototype" (or initial map) of the {callable}.
+ TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>(
+ callable, JSFunction::kPrototypeOrInitialMapOffset);
+ {
+ Label no_initial_map(this), walk_prototype_chain(this);
+ TVARIABLE(HeapObject, var_callable_prototype, callable_prototype);
+
+ // Resolve the "prototype" if the {callable} has an initial map.
+ GotoIfNot(IsMap(callable_prototype), &no_initial_map);
+ var_callable_prototype = LoadObjectField<HeapObject>(
+ callable_prototype, Map::kPrototypeOffset);
+ Goto(&walk_prototype_chain);
+
+ BIND(&no_initial_map);
+ // {callable_prototype} is the hole if the "prototype" property hasn't
+ // been requested so far.
+ Branch(TaggedEqual(callable_prototype, TheHoleConstant()),
+ &return_runtime, &walk_prototype_chain);
+
+ BIND(&walk_prototype_chain);
+ callable_prototype = var_callable_prototype.value();
+ }
- // Loop through the prototype chain looking for the {callable} prototype.
- CSA_ASSERT(this, IsJSReceiver(callable_prototype));
- var_result.Bind(HasInPrototypeChain(context, object, callable_prototype));
- Goto(&return_result);
+ // Loop through the prototype chain looking for the {callable} prototype.
+ var_result = HasInPrototypeChain(context, object, callable_prototype);
+ Goto(&return_result);
+ }
BIND(&return_runtime);
{
// Fallback to the runtime implementation.
- var_result.Bind(
- CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object));
+ var_result = CAST(CallRuntime(Runtime::kOrdinaryHasInstance, context,
+ callable_maybe_smi, object_maybe_smi));
}
Goto(&return_result);
@@ -10111,34 +9664,72 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
ParameterMode mode,
int base_size) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, mode));
+ if (mode == SMI_PARAMETERS) {
+ return ElementOffsetFromIndex(ReinterpretCast<Smi>(index_node), kind,
+ base_size);
+ } else {
+ DCHECK(mode == INTPTR_PARAMETERS);
+ return ElementOffsetFromIndex(ReinterpretCast<IntPtrT>(index_node), kind,
+ base_size);
+ }
+}
+
+template <typename TIndex>
+TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
+ TNode<TIndex> index_node, ElementsKind kind, int base_size) {
+ // TODO(v8:9708): Remove IntPtrT variant in favor of UintPtrT.
+ static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, IntPtrT>::value ||
+ std::is_same<TIndex, UintPtrT>::value,
+ "Only Smi, UintPtrT or IntPtrT index nodes are allowed");
int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
intptr_t index = 0;
+ TNode<IntPtrT> intptr_index_node;
bool constant_index = false;
- if (mode == SMI_PARAMETERS) {
+ if (std::is_same<TIndex, Smi>::value) {
+ TNode<Smi> smi_index_node = ReinterpretCast<Smi>(index_node);
element_size_shift -= kSmiShiftBits;
Smi smi_index;
- constant_index = ToSmiConstant(index_node, &smi_index);
- if (constant_index) index = smi_index.value();
- index_node = BitcastTaggedSignedToWord(index_node);
+ constant_index = ToSmiConstant(smi_index_node, &smi_index);
+ if (constant_index) {
+ index = smi_index.value();
+ } else {
+ if (COMPRESS_POINTERS_BOOL) {
+ smi_index_node = NormalizeSmiIndex(smi_index_node);
+ }
+ }
+ intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node);
} else {
- DCHECK(mode == INTPTR_PARAMETERS);
- constant_index = ToIntPtrConstant(index_node, &index);
+ intptr_index_node = ReinterpretCast<IntPtrT>(index_node);
+ constant_index = ToIntPtrConstant(intptr_index_node, &index);
}
if (constant_index) {
return IntPtrConstant(base_size + element_size * index);
}
- TNode<WordT> shifted_index =
+ TNode<IntPtrT> shifted_index =
(element_size_shift == 0)
- ? UncheckedCast<WordT>(index_node)
+ ? intptr_index_node
: ((element_size_shift > 0)
- ? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordSar(index_node, IntPtrConstant(-element_size_shift)));
+ ? WordShl(intptr_index_node,
+ IntPtrConstant(element_size_shift))
+ : WordSar(intptr_index_node,
+ IntPtrConstant(-element_size_shift)));
return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index));
}
+// Instantiate ElementOffsetFromIndex for Smi and IntPtrT.
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node,
+ ElementsKind kind,
+ int base_size);
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::ElementOffsetFromIndex<IntPtrT>(TNode<IntPtrT> index_node,
+ ElementsKind kind,
+ int base_size);
+
TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
SloppyTNode<IntPtrT> length,
int header_size,
@@ -10146,8 +9737,7 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
// Make sure we point to the last field.
int element_size = 1 << ElementsKindToShiftSize(kind);
int correction = header_size - kHeapObjectTag - element_size;
- TNode<IntPtrT> last_offset =
- ElementOffsetFromIndex(length, kind, INTPTR_PARAMETERS, correction);
+ TNode<IntPtrT> last_offset = ElementOffsetFromIndex(length, kind, correction);
return IntPtrLessThanOrEqual(offset, last_offset);
}
@@ -10203,8 +9793,9 @@ TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() {
return CAST(LoadFeedbackVector(function));
}
-void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
- Node* slot_id) {
+void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
+ TNode<HeapObject> maybe_vector,
+ TNode<UintPtrT> slot_id) {
Label end(this);
// If feedback_vector is not valid, then nothing to do.
GotoIf(IsUndefined(maybe_vector), &end);
@@ -10216,7 +9807,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
TNode<Smi> previous_feedback = CAST(feedback_element);
- TNode<Smi> combined_feedback = SmiOr(previous_feedback, CAST(feedback));
+ TNode<Smi> combined_feedback = SmiOr(previous_feedback, feedback);
GotoIf(SmiEqual(previous_feedback, combined_feedback), &end);
{
@@ -10230,7 +9821,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector,
}
void CodeStubAssembler::ReportFeedbackUpdate(
- SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot_id,
+ TNode<FeedbackVector> feedback_vector, SloppyTNode<UintPtrT> slot_id,
const char* reason) {
// Reset profiler ticks.
StoreObjectFieldNoWriteBarrier(
@@ -10241,7 +9832,7 @@ void CodeStubAssembler::ReportFeedbackUpdate(
// Trace the update.
CallRuntime(Runtime::kInterpreterTraceUpdateFeedback, NoContextConstant(),
LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset),
- SmiTag(slot_id), StringConstant(reason));
+ SmiTag(Signed(slot_id)), StringConstant(reason));
#endif // V8_TRACE_FEEDBACK_UPDATES
}
@@ -10285,14 +9876,16 @@ TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) {
[=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); });
}
-TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
+TNode<IntPtrT> CodeStubAssembler::TryToIntptr(SloppyTNode<Object> key,
+ Label* miss) {
TVARIABLE(IntPtrT, var_intptr_key);
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
+
// Try to convert a heap number to a Smi.
- GotoIfNot(IsHeapNumber(key), miss);
+ GotoIfNot(IsHeapNumber(CAST(key)), miss);
{
- TNode<Float64T> value = LoadHeapNumberValue(key);
+ TNode<Float64T> value = LoadHeapNumberValue(CAST(key));
TNode<Int32T> int_value = RoundFloat64ToInt32(value);
GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
var_intptr_key = ChangeInt32ToIntPtr(int_value);
@@ -10301,7 +9894,7 @@ TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
BIND(&key_is_smi);
{
- var_intptr_key = SmiUntag(key);
+ var_intptr_key = SmiUntag(CAST(key));
Goto(&done);
}
@@ -10354,7 +9947,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(
}
Label if_mapped(this), if_unmapped(this), end(this, &var_result);
TNode<IntPtrT> intptr_two = IntPtrConstant(2);
- TNode<WordT> adjusted_length = IntPtrSub(elements_length, intptr_two);
+ TNode<IntPtrT> adjusted_length = IntPtrSub(elements_length, intptr_two);
GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
@@ -10510,33 +10103,35 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
}
}
-Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
+TNode<Uint8T> CodeStubAssembler::Int32ToUint8Clamped(
+ TNode<Int32T> int32_value) {
Label done(this);
TNode<Int32T> int32_zero = Int32Constant(0);
TNode<Int32T> int32_255 = Int32Constant(255);
- VARIABLE(var_value, MachineRepresentation::kWord32, int32_value);
+ TVARIABLE(Word32T, var_value, int32_value);
GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
- var_value.Bind(int32_zero);
+ var_value = int32_zero;
GotoIf(Int32LessThan(int32_value, int32_zero), &done);
- var_value.Bind(int32_255);
+ var_value = int32_255;
Goto(&done);
BIND(&done);
- return var_value.value();
+ return UncheckedCast<Uint8T>(var_value.value());
}
-Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
+TNode<Uint8T> CodeStubAssembler::Float64ToUint8Clamped(
+ TNode<Float64T> float64_value) {
Label done(this);
- VARIABLE(var_value, MachineRepresentation::kWord32, Int32Constant(0));
+ TVARIABLE(Word32T, var_value, Int32Constant(0));
GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done);
- var_value.Bind(Int32Constant(255));
+ var_value = Int32Constant(255);
GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
{
TNode<Float64T> rounded_value = Float64RoundToEven(float64_value);
- var_value.Bind(TruncateFloat64ToWord32(rounded_value));
+ var_value = TruncateFloat64ToWord32(rounded_value);
Goto(&done);
}
BIND(&done);
- return var_value.value();
+ return UncheckedCast<Uint8T>(var_value.value());
}
Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
@@ -10716,8 +10311,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout);
}
- TNode<RawPtrT> backing_store = LoadJSTypedArrayBackingStore(CAST(object));
- StoreElement(backing_store, elements_kind, intptr_key, converted_value,
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(object));
+ StoreElement(data_ptr, elements_kind, intptr_key, converted_value,
parameter_mode);
Goto(&done);
@@ -10807,7 +10402,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
if (IsSmiElementsKind(elements_kind)) {
GotoIfNot(TaggedIsSmi(value), bailout);
} else if (IsDoubleElementsKind(elements_kind)) {
- value = TryTaggedToFloat64(value, bailout);
+ value = TryTaggedToFloat64(CAST(value), bailout);
}
if (IsGrowStoreMode(store_mode) &&
@@ -11047,7 +10642,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
}
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot) {
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot) {
TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured);
StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
@@ -11090,19 +10685,16 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
StoreFullTaggedNoWriteBarrier(site_list, site);
- StoreFeedbackVectorSlot(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0,
- SMI_PARAMETERS);
+ StoreFeedbackVectorSlot(feedback_vector, slot, site);
return CAST(site);
}
TNode<MaybeObject> CodeStubAssembler::StoreWeakReferenceInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
- SloppyTNode<HeapObject> value, int additional_offset,
- ParameterMode parameter_mode) {
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> value, int additional_offset) {
TNode<MaybeObject> weak_value = MakeWeak(value);
StoreFeedbackVectorSlot(feedback_vector, slot, weak_value,
- UPDATE_WRITE_BARRIER, additional_offset,
- parameter_mode);
+ UPDATE_WRITE_BARRIER, additional_offset);
return weak_value;
}
@@ -11135,14 +10727,14 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
return elements_kind;
}
-Node* CodeStubAssembler::BuildFastLoop(
- const CodeStubAssembler::VariableList& vars, Node* start_index,
- Node* end_index, const FastLoopBody& body, int increment,
- ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
- CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode));
- MachineRepresentation index_rep = ParameterRepresentation(parameter_mode);
- VARIABLE(var, index_rep, start_index);
+template <typename TIndex>
+TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
+ TNode<TIndex> start_index,
+ TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body,
+ int increment,
+ IndexAdvanceMode advance_mode) {
+ TVARIABLE(TIndex, var, start_index);
VariableList vars_copy(vars.begin(), vars.end(), zone());
vars_copy.push_back(&var);
Label loop(this, vars_copy);
@@ -11154,8 +10746,7 @@ Node* CodeStubAssembler::BuildFastLoop(
// to force the loop header check at the end of the loop and branch forward to
// it from the pre-header). The extra branch is slower in the case that the
// loop actually iterates.
- TNode<BoolT> first_check =
- IntPtrOrSmiEqual(var.value(), end_index, parameter_mode);
+ TNode<BoolT> first_check = IntPtrOrSmiEqual(var.value(), end_index);
int32_t first_check_val;
if (ToInt32Constant(first_check, &first_check_val)) {
if (first_check_val) return var.value();
@@ -11167,19 +10758,28 @@ Node* CodeStubAssembler::BuildFastLoop(
BIND(&loop);
{
if (advance_mode == IndexAdvanceMode::kPre) {
- Increment(&var, increment, parameter_mode);
+ Increment(&var, increment);
}
body(var.value());
if (advance_mode == IndexAdvanceMode::kPost) {
- Increment(&var, increment, parameter_mode);
+ Increment(&var, increment);
}
- Branch(IntPtrOrSmiNotEqual(var.value(), end_index, parameter_mode), &loop,
- &after_loop);
+ Branch(IntPtrOrSmiNotEqual(var.value(), end_index), &loop, &after_loop);
}
BIND(&after_loop);
return var.value();
}
+// Instantiate BuildFastLoop for Smi and IntPtrT.
+template TNode<Smi> CodeStubAssembler::BuildFastLoop<Smi>(
+ const VariableList& vars, TNode<Smi> start_index, TNode<Smi> end_index,
+ const FastLoopBody<Smi>& body, int increment,
+ IndexAdvanceMode advance_mode);
+template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>(
+ const VariableList& vars, TNode<IntPtrT> start_index,
+ TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment,
+ IndexAdvanceMode advance_mode);
+
void CodeStubAssembler::BuildFastFixedArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
@@ -11201,17 +10801,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kForward) {
for (int i = first_val; i < last_val; ++i) {
TNode<IntPtrT> index = IntPtrConstant(i);
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- FixedArray::kHeaderSize - kHeapObjectTag);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i);
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- FixedArray::kHeaderSize - kHeapObjectTag);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(
+ index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset);
}
}
@@ -11228,11 +10826,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kReverse) std::swap(start, limit);
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
vars, start, limit,
- [fixed_array, &body](Node* offset) { body(fixed_array, offset); },
+ [&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
- INTPTR_PARAMETERS,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
@@ -11243,22 +10840,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
doesnt_fit);
}
-void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
- Node* start_offset,
- Node* end_offset,
+void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
+ TNode<IntPtrT> start_offset,
+ TNode<IntPtrT> end_offset,
RootIndex root_index) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<Object> root_value = LoadRoot(root_index);
- BuildFastLoop(
+ BuildFastLoop<IntPtrT>(
end_offset, start_offset,
- [this, object, root_value](Node* current) {
+ [=](TNode<IntPtrT> current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current,
root_value);
},
- -kTaggedSize, INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPre);
+ -kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
}
void CodeStubAssembler::BranchIfNumberRelationalComparison(
@@ -11384,11 +10980,9 @@ Operation Reverse(Operation op) {
}
} // anonymous namespace
-Node* CodeStubAssembler::RelationalComparison(Operation op,
- SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- SloppyTNode<Context> context,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::RelationalComparison(
+ Operation op, TNode<Object> left, TNode<Object> right,
+ TNode<Context> context, TVariable<Smi>* var_type_feedback) {
Label return_true(this), return_false(this), do_float_comparison(this),
end(this);
TVARIABLE(Oddball, var_result); // Actually only "true" or "false".
@@ -11403,7 +10997,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op,
if (var_type_feedback != nullptr) {
// Initialize the type feedback to None. The current feedback is combined
// with the previous feedback.
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kNone);
loop_variable_list.push_back(var_type_feedback);
}
Label loop(this, loop_variable_list);
@@ -11914,17 +11508,17 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
}
// ES6 section 7.2.12 Abstract Equality Comparison
-Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- SloppyTNode<Context> context,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
+ SloppyTNode<Object> right,
+ SloppyTNode<Context> context,
+ TVariable<Smi>* var_type_feedback) {
// This is a slightly optimized version of Object::Equals. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
Label if_equal(this), if_notequal(this), do_float_comparison(this),
do_right_stringtonumber(this, Label::kDeferred), end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
TVARIABLE(Float64T, var_left_float);
TVARIABLE(Float64T, var_right_float);
@@ -11984,7 +11578,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
// {left} is Smi and {right} is not HeapNumber or Smi.
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
GotoIf(IsBooleanMap(right_map), &if_right_boolean);
TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
@@ -12009,8 +11603,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_bigint);
{
- result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), right, left));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), right, left));
Goto(&end);
}
@@ -12046,7 +11640,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_string);
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
- result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right));
+ result =
+ CAST(CallBuiltin(Builtins::kStringEqual, context, left, right));
CombineFeedback(var_type_feedback,
SmiOr(CollectFeedbackForString(left_type),
CollectFeedbackForString(right_type)));
@@ -12067,8 +11662,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
Label if_right_boolean(this);
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
GotoIf(IsBooleanMap(right_map), &if_right_boolean);
@@ -12098,38 +11692,35 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_heapnumber);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
- result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), left, right));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), left, right));
Goto(&end);
}
BIND(&if_right_bigint);
{
CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
- result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), left, right));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), left, right));
Goto(&end);
}
BIND(&if_right_string);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
- result.Bind(CallRuntime(Runtime::kBigIntEqualToString,
- NoContextConstant(), left, right));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToString,
+ NoContextConstant(), left, right));
Goto(&end);
}
BIND(&if_right_boolean);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
Goto(&loop);
@@ -12154,8 +11745,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
if (var_type_feedback != nullptr) {
// If {right} is undetectable, it must be either also
// Null or Undefined, or a Receiver (aka document.all).
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ *var_type_feedback = SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
}
Goto(&if_equal);
}
@@ -12164,12 +11755,11 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
if (var_type_feedback != nullptr) {
// Track whether {right} is Null, Undefined or Receiver.
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ *var_type_feedback = SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal);
GotoIfNot(IsBooleanMap(right_map), &if_notequal);
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
Goto(&if_notequal);
}
@@ -12178,8 +11768,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_boolean);
{
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
// If {right} is a Boolean too, it must be a different Boolean.
@@ -12200,7 +11789,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
if (var_type_feedback != nullptr) {
Label if_right_symbol(this);
GotoIf(IsSymbolInstanceType(right_type), &if_right_symbol);
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
Goto(&if_notequal);
BIND(&if_right_symbol);
@@ -12218,8 +11807,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
// {left} is a Primitive and {right} is a JSReceiver, so swapping
// the order is not observable.
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
Goto(&use_symmetry);
}
@@ -12254,8 +11842,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
// When we get here, {right} must be either Null or Undefined.
CSA_ASSERT(this, IsNullOrUndefined(right));
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
+ *var_type_feedback = SmiConstant(
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
}
Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal);
}
@@ -12265,8 +11853,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
// {right} is a Primitive, and neither Null or Undefined;
// convert {left} to Primitive too.
if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
+ *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
}
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
var_left = CallStub(callable, context, left);
@@ -12298,13 +11885,13 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_equal);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
BIND(&if_notequal);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
@@ -12312,9 +11899,9 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left,
return result.value();
}
-TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
- SloppyTNode<Object> rhs,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::StrictEqual(
+ SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ TVariable<Smi>* var_type_feedback) {
// Pseudo-code for the algorithm below:
//
// if (lhs == rhs) {
@@ -12482,7 +12069,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
CollectFeedbackForString(lhs_instance_type);
TNode<Smi> rhs_feedback =
CollectFeedbackForString(rhs_instance_type);
- var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback));
+ *var_type_feedback = SmiOr(lhs_feedback, rhs_feedback);
}
result = CAST(CallBuiltin(Builtins::kStringEqual,
NoContextConstant(), lhs, rhs));
@@ -12556,7 +12143,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
BIND(&if_lhsisoddball);
{
- STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
GotoIf(Int32LessThan(rhs_instance_type,
Int32Constant(ODDBALL_TYPE)),
@@ -12855,8 +12442,8 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
return result.value();
}
-Node* CodeStubAssembler::Typeof(Node* value) {
- VARIABLE(result_var, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
+ TVARIABLE(String, result_var);
Label return_number(this, Label::kDeferred), if_oddball(this),
return_function(this), return_undefined(this), return_object(this),
@@ -12864,7 +12451,8 @@ Node* CodeStubAssembler::Typeof(Node* value) {
GotoIf(TaggedIsSmi(value), &return_number);
- TNode<Map> map = LoadMap(value);
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
GotoIf(IsHeapNumberMap(map), &return_number);
@@ -12890,49 +12478,50 @@ Node* CodeStubAssembler::Typeof(Node* value) {
GotoIf(IsBigIntInstanceType(instance_type), &return_bigint);
CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
- result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
+ result_var = HeapConstant(isolate()->factory()->symbol_string());
Goto(&return_result);
BIND(&return_number);
{
- result_var.Bind(HeapConstant(isolate()->factory()->number_string()));
+ result_var = HeapConstant(isolate()->factory()->number_string());
Goto(&return_result);
}
BIND(&if_oddball);
{
- TNode<Object> type = LoadObjectField(value, Oddball::kTypeOfOffset);
- result_var.Bind(type);
+ TNode<String> type =
+ CAST(LoadObjectField(value_heap_object, Oddball::kTypeOfOffset));
+ result_var = type;
Goto(&return_result);
}
BIND(&return_function);
{
- result_var.Bind(HeapConstant(isolate()->factory()->function_string()));
+ result_var = HeapConstant(isolate()->factory()->function_string());
Goto(&return_result);
}
BIND(&return_undefined);
{
- result_var.Bind(HeapConstant(isolate()->factory()->undefined_string()));
+ result_var = HeapConstant(isolate()->factory()->undefined_string());
Goto(&return_result);
}
BIND(&return_object);
{
- result_var.Bind(HeapConstant(isolate()->factory()->object_string()));
+ result_var = HeapConstant(isolate()->factory()->object_string());
Goto(&return_result);
}
BIND(&return_string);
{
- result_var.Bind(HeapConstant(isolate()->factory()->string_string()));
+ result_var = HeapConstant(isolate()->factory()->string_string());
Goto(&return_result);
}
BIND(&return_bigint);
{
- result_var.Bind(HeapConstant(isolate()->factory()->bigint_string()));
+ result_var = HeapConstant(isolate()->factory()->bigint_string());
Goto(&return_result);
}
@@ -12941,7 +12530,7 @@ Node* CodeStubAssembler::Typeof(Node* value) {
}
TNode<Object> CodeStubAssembler::GetSuperConstructor(
- SloppyTNode<Context> context, SloppyTNode<JSFunction> active_function) {
+ TNode<Context> context, TNode<JSFunction> active_function) {
Label is_not_constructor(this, Label::kDeferred), out(this);
TVARIABLE(Object, result);
@@ -13004,9 +12593,10 @@ TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
return var_result.value();
}
-Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
- Node* context) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object,
+ TNode<Object> callable,
+ TNode<Context> context) {
+ TVARIABLE(Oddball, var_result);
Label if_notcallable(this, Label::kDeferred),
if_notreceiver(this, Label::kDeferred), if_otherhandler(this),
if_nohandler(this, Label::kDeferred), return_true(this),
@@ -13014,7 +12604,7 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
// Ensure that the {callable} is actually a JSReceiver.
GotoIf(TaggedIsSmi(callable), &if_notreceiver);
- GotoIfNot(IsJSReceiver(callable), &if_notreceiver);
+ GotoIfNot(IsJSReceiver(CAST(callable)), &if_notreceiver);
// Load the @@hasInstance property from {callable}.
TNode<Object> inst_of_handler =
@@ -13032,8 +12622,8 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
// Call to Function.prototype[@@hasInstance] directly.
Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance),
CallTrampolineDescriptor{});
- Node* result = CallJS(builtin, context, inst_of_handler, callable, object);
- var_result.Bind(result);
+ var_result =
+ CAST(CallJS(builtin, context, inst_of_handler, callable, object));
Goto(&return_result);
}
@@ -13055,12 +12645,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
BIND(&if_nohandler);
{
// Ensure that the {callable} is actually Callable.
- GotoIfNot(IsCallable(callable), &if_notcallable);
+ GotoIfNot(IsCallable(CAST(callable)), &if_notcallable);
// Use the OrdinaryHasInstance algorithm.
- TNode<Object> result =
- CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object);
- var_result.Bind(result);
+ var_result = CAST(
+ CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object));
Goto(&return_result);
}
@@ -13071,11 +12660,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
{ ThrowTypeError(context, MessageTemplate::kNonObjectInInstanceOfCheck); }
BIND(&return_true);
- var_result.Bind(TrueConstant());
+ var_result = TrueConstant();
Goto(&return_result);
BIND(&return_false);
- var_result.Bind(FalseConstant());
+ var_result = FalseConstant();
Goto(&return_result);
BIND(&return_result);
@@ -13294,9 +12883,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
return CAST(result);
}
-Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
- Node* key,
- Node* value) {
+TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
+ TNode<Context> context, TNode<Object> key, SloppyTNode<Object> value) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Smi> length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
@@ -13326,7 +12914,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array);
StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset,
RootIndex::kFalseValue);
- return result;
+ return CAST(result);
}
TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
@@ -13393,21 +12981,19 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
}
-CodeStubArguments::CodeStubArguments(
- CodeStubAssembler* assembler, Node* argc, Node* fp,
- CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
+ TNode<IntPtrT> argc, TNode<RawPtrT> fp,
+ ReceiverMode receiver_mode)
: assembler_(assembler),
- argc_mode_(param_mode),
receiver_mode_(receiver_mode),
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- argc_, SYSTEM_POINTER_ELEMENTS, param_mode,
+ argc_, SYSTEM_POINTER_ELEMENTS,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
kSystemPointerSize);
- base_ =
- assembler_->UncheckedCast<RawPtrT>(assembler_->IntPtrAdd(fp_, offset));
+ base_ = assembler_->RawPtrAdd(fp_, offset);
}
TNode<Object> CodeStubArguments::GetReceiver() const {
@@ -13422,24 +13008,18 @@ void CodeStubArguments::SetReceiver(TNode<Object> object) const {
base_, assembler_->IntPtrConstant(kSystemPointerSize), object);
}
-TNode<WordT> CodeStubArguments::AtIndexPtr(
- Node* index, CodeStubAssembler::ParameterMode mode) const {
- using Node = compiler::Node;
- Node* negated_index = assembler_->IntPtrOrSmiSub(
- assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
+TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
+ TNode<IntPtrT> negated_index =
+ assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index);
TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- negated_index, SYSTEM_POINTER_ELEMENTS, mode, 0);
- return assembler_->IntPtrAdd(assembler_->UncheckedCast<IntPtrT>(base_),
- offset);
+ negated_index, SYSTEM_POINTER_ELEMENTS, 0);
+ return assembler_->RawPtrAdd(base_, offset);
}
-TNode<Object> CodeStubArguments::AtIndex(
- Node* index, CodeStubAssembler::ParameterMode mode) const {
- DCHECK_EQ(argc_mode_, mode);
- CSA_ASSERT(assembler_,
- assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode));
+TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
+ CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
return assembler_->UncheckedCast<Object>(
- assembler_->LoadFullTagged(AtIndexPtr(index, mode)));
+ assembler_->LoadFullTagged(AtIndexPtr(index)));
}
TNode<Object> CodeStubArguments::AtIndex(int index) const {
@@ -13452,9 +13032,8 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(assembler_->UintPtrOrSmiGreaterThanOrEqual(
- assembler_->IntPtrOrSmiConstant(index, argc_mode_),
- argc_, argc_mode_),
+ assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(
+ assembler_->IntPtrConstant(index), argc_),
&argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -13473,10 +13052,8 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(
- assembler_->UintPtrOrSmiGreaterThanOrEqual(
- assembler_->IntPtrToParameter(index, argc_mode_), argc_, argc_mode_),
- &argument_missing);
+ assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_),
+ &argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -13490,43 +13067,38 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
void CodeStubArguments::ForEach(
const CodeStubAssembler::VariableList& vars,
- const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
- CodeStubAssembler::ParameterMode mode) {
+ const CodeStubArguments::ForEachBodyFunction& body, TNode<IntPtrT> first,
+ TNode<IntPtrT> last) const {
assembler_->Comment("CodeStubArguments::ForEach");
if (first == nullptr) {
- first = assembler_->IntPtrOrSmiConstant(0, mode);
+ first = assembler_->IntPtrConstant(0);
}
if (last == nullptr) {
- DCHECK_EQ(mode, argc_mode_);
last = argc_;
}
- TNode<IntPtrT> start = assembler_->IntPtrSub(
- assembler_->UncheckedCast<IntPtrT>(base_),
- assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS, mode));
- TNode<IntPtrT> end = assembler_->IntPtrSub(
- assembler_->UncheckedCast<IntPtrT>(base_),
- assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode));
- assembler_->BuildFastLoop(
+ TNode<RawPtrT> start = assembler_->RawPtrSub(
+ base_,
+ assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS));
+ TNode<RawPtrT> end = assembler_->RawPtrSub(
+ base_, assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS));
+ assembler_->BuildFastLoop<RawPtrT>(
vars, start, end,
- [this, &body](Node* current) {
- Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
+ [&](TNode<RawPtrT> current) {
+ TNode<Object> arg = assembler_->Load<Object>(current);
body(arg);
},
- -kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPost);
+ -kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(Node* value) {
- Node* pop_count;
+ TNode<IntPtrT> pop_count;
if (receiver_mode_ == ReceiverMode::kHasReceiver) {
- pop_count = assembler_->IntPtrOrSmiAdd(
- argc_, assembler_->IntPtrOrSmiConstant(1, argc_mode_), argc_mode_);
+ pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
} else {
pop_count = argc_;
}
- assembler_->PopAndReturn(assembler_->ParameterToIntPtr(pop_count, argc_mode_),
- value);
+ assembler_->PopAndReturn(pop_count, value);
}
TNode<BoolT> CodeStubAssembler::IsFastElementsKind(
@@ -13642,21 +13214,15 @@ Node* CodeStubAssembler::
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
- CSA_ASSERT(this, SmiGreaterThanOrEqual(builtin_id, SmiConstant(0)));
- CSA_ASSERT(this,
- SmiLessThan(builtin_id, SmiConstant(Builtins::builtin_count)));
+ CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::builtin_count)));
- int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits;
- TNode<WordT> table_index =
- index_shift >= 0
- ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift)
- : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift);
-
- return CAST(
- Load(MachineType::TaggedPointer(),
+ TNode<IntPtrT> offset =
+ ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
+
+ return CAST(BitcastWordToTagged(
+ Load(MachineType::Pointer(),
ExternalConstant(ExternalReference::builtins_address(isolate())),
- table_index));
+ offset)));
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
@@ -13765,11 +13331,9 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
return sfi_code.value();
}
-Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
- Node* shared_info,
- Node* context) {
- CSA_SLOW_ASSERT(this, IsMap(map));
-
+TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
+ TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
+ TNode<Context> context) {
TNode<Code> const code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
@@ -13790,7 +13354,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code);
- return fun;
+ return CAST(fun);
}
void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
@@ -13839,8 +13403,9 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
}
}
-Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
- Label* if_runtime) {
+TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<HeapObject> receiver,
+ Label* if_empty,
+ Label* if_runtime) {
Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred);
TNode<Map> receiver_map = LoadMap(receiver);
@@ -13855,7 +13420,7 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
{
// Avoid runtime-call for empty dictionary receivers.
GotoIfNot(IsDictionaryMap(receiver_map), if_runtime);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver)));
TNode<Smi> length = GetNumberOfElements(properties);
GotoIfNot(TaggedEqual(length, SmiConstant(0)), if_runtime);
// Check that there are no elements on the {receiver} and its prototype
@@ -13881,8 +13446,7 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
TorqueStructArguments CodeStubAssembler::GetFrameArguments(
TNode<RawPtrT> frame, TNode<IntPtrT> argc) {
- return CodeStubArguments(this, argc, frame, INTPTR_PARAMETERS)
- .GetTorqueArguments();
+ return CodeStubArguments(this, argc, frame).GetTorqueArguments();
}
void CodeStubAssembler::Print(const char* s) {
@@ -13976,9 +13540,8 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
// TODO(delphick): Consider using
// AllocateUninitializedJSArrayWithElements to avoid initializing an
// array and then writing over it.
- array =
- AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, SmiConstant(0),
- nullptr, ParameterMode::SMI_PARAMETERS);
+ array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
+ SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS);
Goto(&done);
BIND(&done);
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 9884d04e66..eee3e7a376 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -97,6 +97,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(iterator_symbol, iterator_symbol, IteratorSymbol) \
V(length_string, length_string, LengthString) \
V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(match_symbol, match_symbol, MatchSymbol) \
V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
V(MetaMap, meta_map, MetaMap) \
V(MinusZeroValue, minus_zero_value, MinusZero) \
@@ -114,7 +115,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(object_to_string, object_to_string, ObjectToString) \
V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \
- V(premonomorphic_symbol, premonomorphic_symbol, PremonomorphicSymbol) \
V(PreparseDataMap, preparse_data_map, PreparseDataMap) \
V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \
V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \
@@ -157,11 +157,11 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V)
#ifdef DEBUG
-#define CSA_CHECK(csa, x) \
- (csa)->Check( \
- [&]() -> compiler::Node* { \
- return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
- }, \
+#define CSA_CHECK(csa, x) \
+ (csa)->Check( \
+ [&]() -> compiler::Node* { \
+ return implicit_cast<SloppyTNode<Word32T>>(x); \
+ }, \
#x, __FILE__, __LINE__)
#else
#define CSA_CHECK(csa, x) (csa)->FastCheck(x)
@@ -255,10 +255,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
public TorqueGeneratedExportedMacrosAssembler {
public:
using Node = compiler::Node;
- template <class T>
- using TNode = compiler::TNode<T>;
- template <class T>
- using SloppyTNode = compiler::SloppyTNode<T>;
template <typename T>
using LazyNode = std::function<TNode<T>()>;
@@ -303,11 +299,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return ParameterRepresentation(OptimalParameterMode());
}
+ TNode<IntPtrT> ParameterToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
+ TNode<IntPtrT> ParameterToIntPtr(TNode<IntPtrT> value) { return value; }
+ // TODO(v8:9708): remove once all uses are ported.
TNode<IntPtrT> ParameterToIntPtr(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) value = SmiUntag(value);
return UncheckedCast<IntPtrT>(value);
}
+ template <typename TIndex>
+ TNode<TIndex> IntPtrToParameter(TNode<IntPtrT> value);
+
Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) return SmiTag(value);
return value;
@@ -364,6 +366,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#error Unknown architecture.
#endif
+ // Pointer compression specific. Returns true if the upper 32 bits of a Smi
+ // contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi
+ // can be directly used as an index in element offset computation.
+ TNode<BoolT> IsValidSmiIndex(TNode<Smi> smi);
+
+ // Pointer compression specific. Ensures that the upper 32 bits of a Smi
+ // contain the sign of a lower 32 bits so that the Smi can be directly used
+ // as an index in element offset computation.
+ TNode<Smi> NormalizeSmiIndex(TNode<Smi> smi_index);
+
TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) {
GotoIf(TaggedIsNotSmi(value), fail);
return UncheckedCast<Smi>(value);
@@ -443,18 +455,52 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* MatchesParameterMode(Node* value, ParameterMode mode);
-#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
- Node* OpName(Node* a, Node* b, ParameterMode mode) { \
- if (mode == SMI_PARAMETERS) { \
- return SmiOpName(CAST(a), CAST(b)); \
- } else { \
- DCHECK_EQ(INTPTR_PARAMETERS, mode); \
- return IntPtrOpName(a, b); \
- } \
- }
+#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
+ /* TODO(v8:9708): remove once all uses are ported. */ \
+ Node* OpName(Node* a, Node* b, ParameterMode mode) { \
+ if (mode == SMI_PARAMETERS) { \
+ return SmiOpName(CAST(a), CAST(b)); \
+ } else { \
+ DCHECK_EQ(INTPTR_PARAMETERS, mode); \
+ return IntPtrOpName(UncheckedCast<IntPtrT>(a), \
+ UncheckedCast<IntPtrT>(b)); \
+ } \
+ } \
+ TNode<Smi> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \
+ TNode<IntPtrT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \
+ return IntPtrOpName(a, b); \
+ } \
+ TNode<RawPtrT> OpName(TNode<RawPtrT> a, TNode<RawPtrT> b) { \
+ return ReinterpretCast<RawPtrT>(IntPtrOpName( \
+ ReinterpretCast<IntPtrT>(a), ReinterpretCast<IntPtrT>(b))); \
+ }
+ // TODO(v8:9708): Define BInt operations once all uses are ported.
PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin)
PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub)
+#undef PARAMETER_BINOP
+
+#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
+ /* TODO(v8:9708): remove once all uses are ported. */ \
+ TNode<BoolT> OpName(Node* a, Node* b, ParameterMode mode) { \
+ if (mode == SMI_PARAMETERS) { \
+ return SmiOpName(CAST(a), CAST(b)); \
+ } else { \
+ DCHECK_EQ(INTPTR_PARAMETERS, mode); \
+ return IntPtrOpName(UncheckedCast<IntPtrT>(a), \
+ UncheckedCast<IntPtrT>(b)); \
+ } \
+ } \
+ TNode<BoolT> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \
+ TNode<BoolT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \
+ return IntPtrOpName(a, b); \
+ } \
+ TNode<BoolT> OpName(TNode<RawPtrT> a, TNode<RawPtrT> b) { \
+ return IntPtrOpName(a, b); \
+ }
+ // TODO(v8:9708): Define BInt operations once all uses are ported.
+ PARAMETER_BINOP(IntPtrOrSmiEqual, WordEqual, SmiEqual)
+ PARAMETER_BINOP(IntPtrOrSmiNotEqual, WordNotEqual, SmiNotEqual)
PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
SmiLessThanOrEqual)
@@ -473,31 +519,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
intptr_t ConstexprWordNot(intptr_t a) { return ~a; }
uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; }
- TNode<BoolT> TaggedEqual(TNode<UnionT<Object, MaybeObject>> a,
- TNode<UnionT<Object, MaybeObject>> b) {
- // In pointer-compressed architectures, the instruction selector will narrow
- // this comparison to a 32-bit one.
+ TNode<BoolT> TaggedEqual(TNode<AnyTaggedT> a, TNode<AnyTaggedT> b) {
+#ifdef V8_COMPRESS_POINTERS
+ return Word32Equal(ChangeTaggedToCompressed(a),
+ ChangeTaggedToCompressed(b));
+#else
return WordEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b));
+#endif
}
- TNode<BoolT> TaggedNotEqual(TNode<UnionT<Object, MaybeObject>> a,
- TNode<UnionT<Object, MaybeObject>> b) {
- // In pointer-compressed architectures, the instruction selector will narrow
- // this comparison to a 32-bit one.
- return WordNotEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b));
+ TNode<BoolT> TaggedNotEqual(TNode<AnyTaggedT> a, TNode<AnyTaggedT> b) {
+ return Word32BinaryNot(TaggedEqual(a, b));
}
TNode<Object> NoContextConstant();
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
- compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \
+ TNode<std::remove_pointer<std::remove_reference<decltype( \
std::declval<Heap>().rootAccessorName())>::type>::type> \
name##Constant();
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
@@ -511,11 +556,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BInt> BIntConstant(int value);
+ template <typename TIndex>
+ TNode<TIndex> IntPtrOrSmiConstant(int value);
+ // TODO(v8:9708): remove once all uses are ported.
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- TNode<BoolT> IntPtrOrSmiEqual(Node* left, Node* right, ParameterMode mode);
- TNode<BoolT> IntPtrOrSmiNotEqual(Node* left, Node* right, ParameterMode mode);
+ bool IsIntPtrOrSmiConstantZero(TNode<Smi> test);
+ bool IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test);
+ // TODO(v8:9708): remove once all uses are ported.
bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
+
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
ParameterMode mode);
@@ -557,25 +607,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
+ TNode<Smi> SmiFromUint32(TNode<Uint32T> value);
TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
-#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (SmiValuesAre32Bits()) { \
- return BitcastWordToTaggedSigned(IntPtrOpName( \
- BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \
- } else { \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
- Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \
- } \
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return BitcastWordToTaggedSigned( \
+ IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \
+ BitcastTaggedToWordForTagAndSmiBits(b))); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \
+ } \
}
SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
@@ -595,38 +647,40 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
- WordShl(BitcastTaggedSignedToWord(a), shift));
+ WordShl(BitcastTaggedToWordForTagAndSmiBits(a), shift));
}
TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
if (kTaggedSize == kInt64Size) {
return BitcastWordToTaggedSigned(
- WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ WordAnd(WordShr(BitcastTaggedToWordForTagAndSmiBits(a), shift),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
} else {
// For pointer compressed Smis, we want to make sure that we truncate to
// int32 before shifting, to avoid the values of the top 32-bits from
// leaking into the sign bit of the smi.
return BitcastWordToTaggedSigned(WordAnd(
ChangeInt32ToIntPtr(Word32Shr(
- TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
+ shift)),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
}
}
TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
if (kTaggedSize == kInt64Size) {
return BitcastWordToTaggedSigned(
- WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ WordAnd(WordSar(BitcastTaggedToWordForTagAndSmiBits(a), shift),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
} else {
// For pointer compressed Smis, we want to make sure that we truncate to
// int32 before shifting, to avoid the values of the top 32-bits from
// changing the sign bit of the smi.
return BitcastWordToTaggedSigned(WordAnd(
ChangeInt32ToIntPtr(Word32Sar(
- TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)),
- BitcastTaggedSignedToWord(SmiConstant(-1))));
+ TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
+ shift)),
+ BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1))));
}
}
@@ -648,21 +702,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
-#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (kTaggedSize == kInt64Size) { \
- return IntPtrOpName(BitcastTaggedSignedToWord(a), \
- BitcastTaggedSignedToWord(b)); \
- } else { \
- DCHECK_EQ(kTaggedSize, kInt32Size); \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \
- } \
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (kTaggedSize == kInt64Size) { \
+ return IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \
+ BitcastTaggedToWordForTagAndSmiBits(b)); \
+ } else { \
+ DCHECK_EQ(kTaggedSize, kInt32Size); \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return Int32OpName( \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \
+ } \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
@@ -856,9 +911,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
- TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a);
TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
- TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a);
+ TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a) {
+ return TaggedIsSmi(UncheckedCast<MaybeObject>(a));
+ }
+ TNode<BoolT> TaggedIsNotSmi(TNode<MaybeObject> a);
+ TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a) {
+ return TaggedIsNotSmi(UncheckedCast<MaybeObject>(a));
+ }
// Check that the value is a non-negative smi.
TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
// Check that a word has a word-aligned address.
@@ -918,9 +978,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
- // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect.
- void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true);
-
// Load value from current parent frame by given offset in bytes.
Node* LoadFromParentFrame(int offset,
MachineType type = MachineType::AnyTagged());
@@ -1060,9 +1117,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
- // Load the properties backing store of a JSObject.
- TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
- TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
+ // Load the properties backing store of a JSReceiver.
+ TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSReceiver> object);
+ TNode<HeapObject> LoadFastProperties(SloppyTNode<JSReceiver> object);
// Load the elements backing store of a JSObject.
TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) {
return LoadJSObjectElements(object);
@@ -1148,10 +1205,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
// Load length field of a String object as uint32_t value.
TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
- // Loads a pointer to the sequential String char array.
- Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSPrimitiveWrapper object.
- Node* LoadJSPrimitiveWrapperValue(Node* object);
+ TNode<Object> LoadJSPrimitiveWrapperValue(TNode<JSPrimitiveWrapper> object);
// Figures out whether the value of maybe_object is:
// - a SMI (jump to "if_smi", "extracted" will be the SMI value)
@@ -1175,7 +1230,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsWeakOrCleared(TNode<MaybeObject> value);
TNode<BoolT> IsCleared(TNode<MaybeObject> value);
- TNode<BoolT> IsNotCleared(TNode<MaybeObject> value);
+ TNode<BoolT> IsNotCleared(TNode<MaybeObject> value) {
+ return Word32BinaryNot(IsCleared(value));
+ }
// Removes the weak bit + asserts it was set.
TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value);
@@ -1183,12 +1240,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value,
Label* if_cleared);
- TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value);
- TNode<BoolT> IsNotWeakReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value);
- TNode<BoolT> IsStrongReferenceTo(TNode<MaybeObject> object,
- TNode<Object> value);
+ // Checks if |maybe_object| is a weak reference to given |heap_object|.
+ // Works for both any tagged |maybe_object| values.
+ TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> maybe_object,
+ TNode<HeapObject> heap_object);
+ // Returns true if the |object| is a HeapObject and |maybe_object| is a weak
+ // reference to |object|.
+ // The |maybe_object| must not be a Smi.
+ TNode<BoolT> IsWeakReferenceToObject(TNode<MaybeObject> maybe_object,
+ TNode<Object> object);
TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
@@ -1341,9 +1401,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole);
// Load a feedback slot from a FeedbackVector.
+ template <typename TIndex>
TNode<MaybeObject> LoadFeedbackVectorSlot(
- Node* object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot,
+ int additional_offset = 0);
TNode<IntPtrT> LoadFeedbackVectorLength(TNode<FeedbackVector>);
TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array,
@@ -1383,13 +1444,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high);
TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high);
- void StoreJSTypedArrayElementFromTagged(TNode<Context> context,
- TNode<JSTypedArray> typed_array,
- TNode<Smi> index_node,
- TNode<Object> value,
- ElementsKind elements_kind);
-
// Context manipulation
+ TNode<BoolT> LoadContextHasExtensionField(SloppyTNode<Context> context);
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
int slot_index);
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -1608,10 +1664,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
void StoreFeedbackVectorSlot(
- Node* object, Node* index, Node* value,
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<AnyTaggedT> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ int additional_offset = 0);
void EnsureArrayLengthWritable(TNode<Map> map, Label* bailout);
@@ -1633,8 +1689,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
Label* bailout);
- void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
- Node* value);
+ void StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
+ TNode<IntPtrT> end_address,
+ TNode<Object> value);
Node* AllocateCellWithValue(Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -1642,7 +1699,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER);
}
- Node* LoadCellValue(Node* cell);
+ TNode<Object> LoadCellValue(Node* cell);
void StoreCellValue(Node* cell, Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -1698,11 +1755,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<String> parent,
TNode<Smi> offset);
- // Allocate an appropriate one- or two-byte ConsString with the first and
- // second parts specified by |left| and |right|.
- TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
- TNode<String> right);
-
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
@@ -1714,26 +1766,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename CollectionType>
Node* AllocateOrderedHashTable();
- // Builds code that finds OrderedHashTable entry for a key with hash code
- // {hash} with using the comparison code generated by {key_compare}. The code
- // jumps to {entry_found} if the key is found, or to {not_found} if the key
- // was not found. In the {entry_found} branch, the variable
- // entry_start_position will be bound to the index of the entry (relative to
- // OrderedHashTable::kHashTableStartIndex).
- //
- // The {CollectionType} template parameter stands for the particular instance
- // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
- template <typename CollectionType>
- void FindOrderedHashTableEntry(
- Node* table, Node* hash,
- const std::function<void(TNode<Object>, Label*, Label*)>& key_compare,
- Variable* entry_start_position, Label* entry_found, Label* not_found);
-
template <typename CollectionType>
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
- void InitializeStructBody(Node* object, Node* map, Node* size,
+ void InitializeStructBody(TNode<HeapObject> object, TNode<IntPtrT> size,
int start_offset = Struct::kHeaderSize);
TNode<JSObject> AllocateJSObjectFromMap(
@@ -1742,14 +1779,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
- Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size, Node* properties = nullptr,
Node* elements = nullptr,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
- void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
- Node* instance_size);
+ void InitializeJSObjectBodyWithSlackTracking(
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
- Node* object, Node* map, Node* instance_size,
+ SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
@@ -1762,7 +1802,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- Node* allocation_site, Node* capacity,
+ TNode<AllocationSite> allocation_site, Node* capacity,
ParameterMode capacity_mode = INTPTR_PARAMETERS,
AllocationFlags allocation_flags = kNone,
int array_header_size = JSArray::kSize);
@@ -1771,20 +1811,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// The ParameterMode argument is only used for the capacity parameter.
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, Node* capacity,
- TNode<Smi> length, Node* allocation_site = nullptr,
+ TNode<Smi> length, TNode<AllocationSite> allocation_site = {},
ParameterMode capacity_mode = INTPTR_PARAMETERS,
AllocationFlags allocation_flags = kNone);
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length) {
- return AllocateJSArray(kind, array_map, capacity, length, nullptr,
+ return AllocateJSArray(kind, array_map, capacity, length, {},
SMI_PARAMETERS);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, capacity, length, nullptr,
+ return AllocateJSArray(kind, array_map, capacity, length, {},
INTPTR_PARAMETERS, allocation_flags);
}
@@ -1792,7 +1832,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
TNode<FixedArrayBase> elements,
TNode<Smi> length,
- Node* allocation_site = nullptr,
+ TNode<AllocationSite> allocation_site = {},
int array_header_size = JSArray::kSize);
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
@@ -1806,15 +1846,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// If |convert_holes| is set kDontConvert, holes are also copied to the
// resulting array, who will have the same elements kind as |array|. The
// function generates significantly less code in this case.
- Node* CloneFastJSArray(
- Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS,
- Node* allocation_site = nullptr,
+ TNode<JSArray> CloneFastJSArray(
+ TNode<Context> context, TNode<JSArray> array,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ TNode<AllocationSite> allocation_site = {},
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
- Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count,
+ Node* ExtractFastJSArray(TNode<Context> context, TNode<JSArray> array,
+ Node* begin, Node* count,
ParameterMode mode = INTPTR_PARAMETERS,
Node* capacity = nullptr,
- Node* allocation_site = nullptr);
+ TNode<AllocationSite> allocation_site = {});
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
@@ -1828,6 +1870,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
fixed_array_map);
}
+ TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
+ Label* if_bailout);
+ TNode<Object> GetConstructor(TNode<Map> map);
+
TNode<Map> GetStructMap(InstanceType instance_type);
TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
@@ -1879,10 +1925,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> object,
IterationKind mode);
+ // TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
SloppyTNode<Object> value,
SloppyTNode<Oddball> done);
- Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
+
+ // TODO(v8:9722): Return type should be JSIteratorResult
+ TNode<JSObject> AllocateJSIteratorResultForEntry(TNode<Context> context,
+ TNode<Object> key,
+ SloppyTNode<Object> value);
TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
TNode<Object> originalArray,
@@ -1904,6 +1955,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class DestroySource { kNo, kYes };
+ // Collect the callable |maybe_target| feedback for either a CALL_IC or
+ // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
+ void CollectCallableFeedback(TNode<Object> maybe_target,
+ TNode<Context> context,
+ TNode<FeedbackVector> feedback_vector,
+ TNode<UintPtrT> slot_id);
+
+ // Collect CALL_IC feedback for |maybe_target| function in the
+ // |feedback_vector| at |slot_id|, and the call counts in
+ // the |feedback_vector| at |slot_id+1|.
+ void CollectCallFeedback(TNode<Object> maybe_target, TNode<Context> context,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id);
+
+ // Increment the call count for a CALL_IC or construct call.
+ // The call count is located at feedback_vector[slot_id + 1].
+ void IncrementCallCount(TNode<FeedbackVector> feedback_vector,
+ TNode<UintPtrT> slot_id);
+
// Specify DestroySource::kYes if {from_array} is being supplanted by
// {to_array}. This offers a slight performance benefit by simply copying the
// array word by word. The source may be destroyed at the end of this macro.
@@ -2152,27 +2222,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// kAllFixedArrays, the generated code is more compact and efficient if the
// caller can specify whether only FixedArrays or FixedDoubleArrays will be
// passed as the |source| parameter.
- Node* CloneFixedArray(Node* source,
- ExtractFixedArrayFlags flags =
- ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
+ TNode<FixedArrayBase> CloneFixedArray(
+ TNode<FixedArrayBase> source,
+ ExtractFixedArrayFlags flags =
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) {
ParameterMode mode = OptimalParameterMode();
return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr,
nullptr, flags, mode);
}
- // Copies |character_count| elements from |from_string| to |to_string|
- // starting at the |from_index|'th character. |from_string| and |to_string|
- // can either be one-byte strings or two-byte strings, although if
- // |from_string| is two-byte, then |to_string| must be two-byte.
- // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <=
- // |from_index| <= |from_index| + |character_count| <= from_string.length and
- // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length.
- void CopyStringCharacters(Node* from_string, Node* to_string,
- TNode<IntPtrT> from_index, TNode<IntPtrT> to_index,
- TNode<IntPtrT> character_count,
- String::Encoding from_encoding,
- String::Encoding to_encoding);
-
// Loads an element from |array| of |from_kind| elements by given |offset|
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
@@ -2194,21 +2252,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
- Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
- Node* key, Label* bailout);
+ TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind kind, Node* key,
+ Label* bailout);
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
- Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
- Node* key, Node* capacity, ParameterMode mode,
- Label* bailout);
+ TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind kind, Node* key,
+ Node* capacity,
+ ParameterMode mode,
+ Label* bailout);
// Grows elements capacity of given object. Returns new elements.
- Node* GrowElementsCapacity(Node* object, Node* elements,
- ElementsKind from_kind, ElementsKind to_kind,
- Node* capacity, Node* new_capacity,
- ParameterMode mode, Label* bailout);
+ TNode<FixedArrayBase> GrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ Node* capacity, Node* new_capacity,
+ ParameterMode mode,
+ Label* bailout);
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
@@ -2223,25 +2286,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* base_allocation_size,
Node* allocation_site);
- Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
- Node* TruncateTaggedToFloat64(Node* context, Node* value);
- Node* TruncateTaggedToWord32(Node* context, Node* value);
- void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number,
- Variable* var_word32, Label* if_bigint,
- Variable* var_bigint);
- void TaggedToWord32OrBigIntWithFeedback(
- Node* context, Node* value, Label* if_number, Variable* var_word32,
- Label* if_bigint, Variable* var_bigint, Variable* var_feedback);
+ TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
+ Label* if_valueisnotnumber);
+ TNode<Float64T> TruncateTaggedToFloat64(SloppyTNode<Context> context,
+ SloppyTNode<Object> value);
+ TNode<Word32T> TruncateTaggedToWord32(SloppyTNode<Context> context,
+ SloppyTNode<Object> value);
+ void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value,
+ Label* if_number, TVariable<Word32T>* var_word32,
+ Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint);
+ void TaggedToWord32OrBigIntWithFeedback(TNode<Context> context,
+ TNode<Object> value, Label* if_number,
+ TVariable<Word32T>* var_word32,
+ Label* if_bigint,
+ TVariable<Object>* var_maybe_bigint,
+ TVariable<Smi>* var_feedback);
// Truncate the floating point value of a HeapNumber to an Int32.
TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
// Conversions.
- void TryHeapNumberToSmi(TNode<HeapNumber> number,
- TVariable<Smi>& output, // NOLINT(runtime/references)
+ void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>* output,
Label* if_smi);
- void TryFloat64ToSmi(TNode<Float64T> number,
- TVariable<Smi>& output, // NOLINT(runtime/references)
+ void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>* output,
Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
@@ -2377,7 +2445,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSGeneratorObject(TNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
@@ -2388,6 +2456,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSStringIterator(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSRegExpStringIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object);
@@ -2395,6 +2464,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSGeneratorMap(TNode<Map> map);
TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
@@ -2537,47 +2607,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Return the single character string with only {code}.
TNode<String> StringFromSingleCharCode(TNode<Int32T> code);
- // Return a new string object which holds a substring containing the range
- // [from,to[ of string.
- TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
- TNode<IntPtrT> to);
-
- // Return a new string object produced by concatenating |first| with |second|.
- TNode<String> StringAdd(Node* context, TNode<String> first,
- TNode<String> second);
-
- // Check if |string| is an indirect (thin or flat cons) string type that can
- // be dereferenced by DerefIndirectString.
- void BranchIfCanDerefIndirectString(TNode<String> string,
- TNode<Int32T> instance_type,
- Label* can_deref, Label* cannot_deref);
- // Unpack an indirect (thin or flat cons) string type.
- void DerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type);
- // Check if |var_string| has an indirect (thin or flat cons) string type,
- // and unpack it if so.
- void MaybeDerefIndirectString(TVariable<String>* var_string,
- TNode<Int32T> instance_type, Label* did_deref,
- Label* cannot_deref);
- // Check if |var_left| or |var_right| has an indirect (thin or flat cons)
- // string type, and unpack it/them if so. Fall through if nothing was done.
- void MaybeDerefIndirectStrings(TVariable<String>* var_left,
- TNode<Int32T> left_instance_type,
- TVariable<String>* var_right,
- TNode<Int32T> right_instance_type,
- Label* did_something);
- TNode<String> DerefIndirectString(TNode<String> string,
- TNode<Int32T> instance_type,
- Label* cannot_deref);
-
- TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
-
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
TNode<Number> StringToNumber(TNode<String> input);
// Convert a Number to a String.
TNode<String> NumberToString(TNode<Number> input);
+ TNode<String> NumberToString(TNode<Number> input, Label* bailout);
+
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
@@ -2715,6 +2752,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32Equal(Word32And(word32, const_mask), const_mask);
}
+ // Returns true if the bit field |BitField| in |word32| is equal to a given.
+ // constant |value|. Avoids a shift compared to using DecodeWord32.
+ template <typename BitField>
+ TNode<BoolT> IsEqualInWord32(TNode<Word32T> word32,
+ typename BitField::FieldType value) {
+ TNode<Word32T> masked_word32 =
+ Word32And(word32, Int32Constant(BitField::kMask));
+ return Word32Equal(masked_word32, Int32Constant(BitField::encode(value)));
+ }
+
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
TNode<BoolT> IsSetWord(SloppyTNode<WordT> word) {
@@ -2730,9 +2777,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Smi-encoding of the mask is performed implicitly!
TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) {
intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
- return WordNotEqual(
- WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)),
- IntPtrConstant(0));
+ return WordNotEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(smi),
+ IntPtrConstant(mask_word)),
+ IntPtrConstant(0));
}
// Returns true if all of the |T|'s bits in given |word32| are clear.
@@ -2762,11 +2809,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void IncrementCounter(StatsCounter* counter, int delta);
void DecrementCounter(StatsCounter* counter, int delta);
- void Increment(Variable* variable, int value = 1,
- ParameterMode mode = INTPTR_PARAMETERS);
- void Decrement(Variable* variable, int value = 1,
- ParameterMode mode = INTPTR_PARAMETERS) {
- Increment(variable, -value, mode);
+ template <typename TIndex>
+ void Increment(TVariable<TIndex>* variable, int value = 1);
+
+ template <typename TIndex>
+ void Decrement(TVariable<TIndex>* variable, int value = 1) {
+ Increment(variable, -value);
}
// Generates "if (false) goto label" code. Useful for marking a label as
@@ -2780,8 +2828,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Note: If |key| does not yet have a hash, |if_notinternalized| will be taken
// even if |key| is an array index. |if_keyisunique| will never
// be taken for array indices.
- void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
- Label* if_keyisunique, Variable* var_unique, Label* if_bailout,
+ void TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
+ TVariable<IntPtrT>* var_index, Label* if_keyisunique,
+ TVariable<Name>* var_unique, Label* if_bailout,
Label* if_notinternalized = nullptr);
// Performs a hash computation and string table lookup for the given string,
@@ -2793,8 +2842,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// - |if_not_internalized| if the string is not in the string table (but
// does not add it).
// - |if_bailout| for unsupported cases (e.g. uncachable array index).
- void TryInternalizeString(Node* string, Label* if_index, Variable* var_index,
- Label* if_internalized, Variable* var_internalized,
+ void TryInternalizeString(SloppyTNode<String> string, Label* if_index,
+ TVariable<IntPtrT>* var_index,
+ Label* if_internalized,
+ TVariable<Name>* var_internalized,
Label* if_not_internalized, Label* if_bailout);
// Calculates array index for given dictionary entry and entry field.
@@ -2938,10 +2989,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> BasicLoadNumberDictionaryElement(
TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
Label* not_data, Label* if_hole);
- void BasicStoreNumberDictionaryElement(TNode<NumberDictionary> dictionary,
- TNode<IntPtrT> intptr_index,
- TNode<Object> value, Label* not_data,
- Label* if_hole, Label* read_only);
template <class Dictionary>
void FindInsertionEntry(TNode<Dictionary> dictionary, TNode<Name> key,
@@ -3053,7 +3100,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(SloppyTNode<JSObject> object, SloppyTNode<Map> map,
+ void TryLookupProperty(SloppyTNode<JSReceiver> object, SloppyTNode<Map> map,
SloppyTNode<Int32T> instance_type,
SloppyTNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
@@ -3113,10 +3160,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if {object} has {prototype} somewhere in it's prototype
// chain, otherwise false is returned. Might cause arbitrary side effects
// due to [[GetPrototypeOf]] invocations.
- Node* HasInPrototypeChain(Node* context, Node* object,
- SloppyTNode<Object> prototype);
+ TNode<Oddball> HasInPrototypeChain(TNode<Context> context,
+ TNode<HeapObject> object,
+ TNode<Object> prototype);
// ES6 section 7.3.19 OrdinaryHasInstance (C, O)
- Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
+ TNode<Oddball> OrdinaryHasInstance(TNode<Context> context,
+ TNode<Object> callable,
+ TNode<Object> object);
// Load type feedback vector from the stub caller's frame.
TNode<FeedbackVector> LoadFeedbackVectorForStub();
@@ -3137,12 +3187,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SloppyTNode<JSFunction> closure);
// Update the type feedback vector.
- void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
+ void UpdateFeedback(TNode<Smi> feedback,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id);
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
- void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector,
- SloppyTNode<IntPtrT> slot_id, const char* reason);
+ void ReportFeedbackUpdate(TNode<FeedbackVector> feedback_vector,
+ SloppyTNode<UintPtrT> slot_id, const char* reason);
// Combine the new feedback with the existing_feedback. Do nothing if
// existing_feedback is nullptr.
@@ -3185,8 +3237,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Context> LoadScriptContext(TNode<Context> context,
TNode<IntPtrT> context_index);
- Node* Int32ToUint8Clamped(Node* int32_value);
- Node* Float64ToUint8Clamped(Node* float64_value);
+ TNode<Uint8T> Int32ToUint8Clamped(TNode<Int32T> int32_value);
+ TNode<Uint8T> Float64ToUint8Clamped(TNode<Float64T> float64_value);
Node* PrepareValueForWriteToTypedArray(TNode<Object> input,
ElementsKind elements_kind,
@@ -3229,13 +3281,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Store a weak in-place reference into the FeedbackVector.
TNode<MaybeObject> StoreWeakReferenceInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
- SloppyTNode<HeapObject> value, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
+ TNode<HeapObject> value, int additional_offset = 0);
// Create a new AllocationSite and install it into a feedback vector.
TNode<AllocationSite> CreateAllocationSiteInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot);
+ TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot);
// TODO(ishell, cbruni): Change to HasBoilerplate.
TNode<BoolT> NotHasBoilerplate(TNode<Object> maybe_literal_site);
@@ -3245,19 +3296,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class IndexAdvanceMode { kPre, kPost };
- using FastLoopBody = std::function<void(Node* index)>;
+ template <typename TIndex>
+ using FastLoopBody = std::function<void(TNode<TIndex> index)>;
- Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
- Node* end_index, const FastLoopBody& body, int increment,
- ParameterMode parameter_mode,
- IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
+ template <typename TIndex>
+ TNode<TIndex> BuildFastLoop(
+ const VariableList& var_list, TNode<TIndex> start_index,
+ TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
- Node* BuildFastLoop(Node* start_index, Node* end_index,
- const FastLoopBody& body, int increment,
- ParameterMode parameter_mode,
- IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
+ template <typename TIndex>
+ TNode<TIndex> BuildFastLoop(
+ TNode<TIndex> start_index, TNode<TIndex> end_index,
+ const FastLoopBody<TIndex>& body, int increment,
+ IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
- increment, parameter_mode, advance_mode);
+ increment, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
@@ -3304,13 +3358,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* doesnt_fit, int base_size,
ParameterMode mode);
- void InitializeFieldsWithRoot(Node* object, Node* start_offset,
- Node* end_offset, RootIndex root);
+ void InitializeFieldsWithRoot(TNode<HeapObject> object,
+ TNode<IntPtrT> start_offset,
+ TNode<IntPtrT> end_offset, RootIndex root);
- Node* RelationalComparison(Operation op, SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- SloppyTNode<Context> context,
- Variable* var_type_feedback = nullptr);
+ TNode<Oddball> RelationalComparison(
+ Operation op, TNode<Object> left, TNode<Object> right,
+ TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr);
void BranchIfNumberRelationalComparison(Operation op,
SloppyTNode<Number> left,
@@ -3360,12 +3414,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false);
- Node* Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- SloppyTNode<Context> context,
- Variable* var_type_feedback = nullptr);
+ TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ SloppyTNode<Context> context,
+ TVariable<Smi>* var_type_feedback = nullptr);
TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- Variable* var_type_feedback = nullptr);
+ TVariable<Smi>* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
@@ -3395,16 +3449,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
HasPropertyLookupMode::kHasProperty);
}
- Node* Typeof(Node* value);
+ TNode<String> Typeof(SloppyTNode<Object> value);
- TNode<Object> GetSuperConstructor(SloppyTNode<Context> context,
- SloppyTNode<JSFunction> active_function);
+ TNode<Object> GetSuperConstructor(TNode<Context> context,
+ TNode<JSFunction> active_function);
TNode<JSReceiver> SpeciesConstructor(
SloppyTNode<Context> context, SloppyTNode<Object> object,
SloppyTNode<JSReceiver> default_constructor);
- Node* InstanceOf(Node* object, Node* callable, Node* context);
+ TNode<Oddball> InstanceOf(TNode<Object> object, TNode<Object> callable,
+ TNode<Context> context);
// Debug helpers
Node* IsDebugActive();
@@ -3431,8 +3486,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSTypedArray helpers
TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
- TNode<RawPtrT> LoadJSTypedArrayBackingStore(TNode<JSTypedArray> typed_array);
+ TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array);
+ template <typename TIndex>
+ TNode<IntPtrT> ElementOffsetFromIndex(TNode<TIndex> index, ElementsKind kind,
+ int base_size = 0);
+ // TODO(v8:9708): remove once all uses are ported.
TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
ParameterMode mode, int base_size = 0);
@@ -3451,8 +3510,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SloppyTNode<SharedFunctionInfo> shared_info,
Label* if_compile_lazy = nullptr);
- Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
- Node* context);
+ TNode<JSFunction> AllocateFunctionWithMapAndContext(
+ TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
+ TNode<Context> context);
// Promise helpers
Node* IsPromiseHookEnabled();
@@ -3463,7 +3523,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// for..in helpers
void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
Label* if_fast, Label* if_slow);
- Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime);
+ TNode<Map> CheckEnumCache(TNode<HeapObject> receiver, Label* if_empty,
+ Label* if_runtime);
TNode<Object> GetArgumentValue(TorqueStructArguments args,
TNode<IntPtrT> index);
@@ -3620,11 +3681,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
- TNode<IntPtrT> TryToIntptr(Node* key, Label* miss);
-
- void BranchIfPrototypesHaveNoElements(Node* receiver_map,
- Label* definitely_no_elements,
- Label* possibly_elements);
+ TNode<IntPtrT> TryToIntptr(SloppyTNode<Object> key, Label* miss);
void InitializeFunctionContext(Node* native_context, Node* context,
int slots);
@@ -3655,13 +3712,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
- TNode<JSArray> AllocateUninitializedJSArray(TNode<Map> array_map,
- TNode<Smi> length,
- Node* allocation_site,
- TNode<IntPtrT> size_in_bytes);
+ TNode<JSArray> AllocateUninitializedJSArray(
+ TNode<Map> array_map, TNode<Smi> length,
+ TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes);
TNode<BoolT> IsValidSmi(TNode<Smi> smi);
- Node* SmiShiftBitsConstant();
+
+ TNode<IntPtrT> SmiShiftBitsConstant() {
+ return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ }
+ TNode<Int32T> SmiShiftBitsConstant32() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+ }
// Emits keyed sloppy arguments load if the |value| is nullptr or store
// otherwise. Returns either the loaded value or |value|.
@@ -3689,10 +3751,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void GenerateEqual_Same(SloppyTNode<Object> value, Label* if_equal,
Label* if_notequal,
Variable* var_type_feedback = nullptr);
- TNode<String> AllocAndCopyStringCharacters(Node* from,
- Node* from_instance_type,
- TNode<IntPtrT> from_index,
- TNode<IntPtrT> character_count);
static const int kElementLoopUnrollThreshold = 8;
@@ -3705,11 +3763,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Variable* var_numeric, Variable* var_feedback);
template <Object::Conversion conversion>
- void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number,
- Variable* var_word32,
+ void TaggedToWord32OrBigIntImpl(TNode<Context> context, TNode<Object> value,
+ Label* if_number,
+ TVariable<Word32T>* var_word32,
Label* if_bigint = nullptr,
- Variable* var_bigint = nullptr,
- Variable* var_feedback = nullptr);
+ TVariable<Object>* var_maybe_bigint = nullptr,
+ TVariable<Smi>* var_feedback = nullptr);
private:
// Low-level accessors for Descriptor arrays.
@@ -3727,36 +3786,48 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
};
+// template <typename TIndex>
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
using Node = compiler::Node;
- template <class T>
- using TNode = compiler::TNode<T>;
- template <class T>
- using SloppyTNode = compiler::SloppyTNode<T>;
enum ReceiverMode { kHasReceiver, kNoReceiver };
- // |argc| is an intptr value which specifies the number of arguments passed
- // to the builtin excluding the receiver. The arguments will include a
- // receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
+ // |argc| specifies the number of arguments passed to the builtin excluding
+ // the receiver. The arguments will include a receiver iff |receiver_mode|
+ // is kHasReceiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, argc, nullptr,
- CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) {
- }
+ : CodeStubArguments(assembler, argc, TNode<RawPtrT>(), receiver_mode) {}
+
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
+ : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc),
+ TNode<RawPtrT>(), receiver_mode) {}
- // |argc| is either a smi or intptr depending on |param_mode|. The arguments
- // include a receiver iff |receiver_mode| is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
- CodeStubAssembler::ParameterMode param_mode,
+ // TODO(v8:9708): Consider removing this variant
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
+ : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc),
+ TNode<RawPtrT>(), receiver_mode) {}
+
+ // |argc| specifies the number of arguments passed to the builtin excluding
+ // the receiver. The arguments will include a receiver iff |receiver_mode|
+ // is kHasReceiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
+ TNode<RawPtrT> fp,
ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
+ TNode<RawPtrT> fp,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
+ : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp,
+ receiver_mode) {}
+
// Used by Torque to construct arguments based on a Torque-defined
// struct of values.
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS),
receiver_mode_(ReceiverMode::kHasReceiver),
argc_(torque_arguments.length),
base_(torque_arguments.base),
@@ -3769,14 +3840,17 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
void SetReceiver(TNode<Object> object) const;
// Computes address of the index'th argument.
- TNode<WordT> AtIndexPtr(Node* index,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) const;
+ TNode<RawPtrT> AtIndexPtr(TNode<IntPtrT> index) const;
+ TNode<RawPtrT> AtIndexPtr(TNode<Smi> index) const {
+ return AtIndexPtr(assembler_->ParameterToIntPtr(index));
+ }
// |index| is zero-based and does not include the receiver
- TNode<Object> AtIndex(Node* index,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) const;
+ TNode<Object> AtIndex(TNode<IntPtrT> index) const;
+ // TODO(v8:9708): Consider removing this variant
+ TNode<Object> AtIndex(TNode<Smi> index) const {
+ return AtIndex(assembler_->ParameterToIntPtr(index));
+ }
TNode<Object> AtIndex(int index) const;
@@ -3786,15 +3860,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
TNode<Object> GetOptionalArgumentValue(int index,
TNode<Object> default_value);
- Node* GetLength(CodeStubAssembler::ParameterMode mode) const {
- DCHECK_EQ(mode, argc_mode_);
- return argc_;
- }
+ TNode<IntPtrT> GetLength() const { return argc_; }
TorqueStructArguments GetTorqueArguments() const {
- DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
- return TorqueStructArguments{assembler_->UncheckedCast<RawPtrT>(fp_), base_,
- assembler_->UncheckedCast<IntPtrT>(argc_)};
+ return TorqueStructArguments{fp_, base_, argc_};
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
@@ -3802,28 +3871,32 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
TNode<Object> default_value);
- TNode<IntPtrT> GetLength() const {
- DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
- return assembler_->UncheckedCast<IntPtrT>(argc_);
- }
- using ForEachBodyFunction = std::function<void(Node* arg)>;
+ using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
- void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
- Node* last = nullptr,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) {
+ template <typename TIndex>
+ void ForEach(const ForEachBodyFunction& body, TNode<TIndex> first = {},
+ TNode<TIndex> last = {}) const {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
- const ForEachBodyFunction& body, Node* first = nullptr,
- Node* last = nullptr,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS);
+ const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
+ TNode<IntPtrT> last = {}) const;
+
+ void ForEach(const CodeStubAssembler::VariableList& vars,
+ const ForEachBodyFunction& body, TNode<Smi> first,
+ TNode<Smi> last = {}) const {
+ TNode<IntPtrT> first_intptr = assembler_->ParameterToIntPtr(first);
+ TNode<IntPtrT> last_intptr;
+ if (last != nullptr) {
+ last_intptr = assembler_->ParameterToIntPtr(last);
+ }
+ return ForEach(vars, body, first_intptr, last_intptr);
+ }
void PopAndReturn(Node* value);
@@ -3831,11 +3904,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
Node* GetArguments();
CodeStubAssembler* assembler_;
- CodeStubAssembler::ParameterMode argc_mode_;
ReceiverMode receiver_mode_;
- Node* argc_;
+ TNode<IntPtrT> argc_;
TNode<RawPtrT> base_;
- Node* fp_;
+ TNode<RawPtrT> fp_;
};
class ToDirectStringAssembler : public CodeStubAssembler {
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 6e9613005e..ef3d83a06e 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -28,7 +28,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate),
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
- enabled_(true) {
+ enabled_script_and_eval_(true) {
CompilationSubCache* subcaches[kSubCacheCount] = {
&script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
@@ -254,7 +254,7 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) return;
+ if (!IsEnabledScriptAndEval()) return;
eval_global_.Remove(function_info);
eval_contextual_.Remove(function_info);
@@ -265,7 +265,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> native_context, LanguageMode language_mode) {
- if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
+ if (!IsEnabledScriptAndEval()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
resource_options, native_context, language_mode);
@@ -277,7 +277,7 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source,
LanguageMode language_mode,
int position) {
InfoCellPair result;
- if (!IsEnabled()) return result;
+ if (!IsEnabledScriptAndEval()) return result;
const char* cache_type;
@@ -303,8 +303,6 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source,
MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
JSRegExp::Flags flags) {
- if (!IsEnabled()) return MaybeHandle<FixedArray>();
-
return reg_exp_.Lookup(source, flags);
}
@@ -312,7 +310,7 @@ void CompilationCache::PutScript(Handle<String> source,
Handle<Context> native_context,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
- if (!IsEnabled()) return;
+ if (!IsEnabledScriptAndEval()) return;
LOG(isolate(), CompilationCacheEvent("put", "script", *function_info));
script_.Put(source, native_context, language_mode, function_info);
@@ -324,7 +322,7 @@ void CompilationCache::PutEval(Handle<String> source,
Handle<SharedFunctionInfo> function_info,
Handle<FeedbackCell> feedback_cell,
int position) {
- if (!IsEnabled()) return;
+ if (!IsEnabledScriptAndEval()) return;
const char* cache_type;
HandleScope scope(isolate());
@@ -344,8 +342,6 @@ void CompilationCache::PutEval(Handle<String> source,
void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data) {
- if (!IsEnabled()) return;
-
reg_exp_.Put(source, flags, data);
}
@@ -367,10 +363,12 @@ void CompilationCache::MarkCompactPrologue() {
}
}
-void CompilationCache::Enable() { enabled_ = true; }
+void CompilationCache::EnableScriptAndEval() {
+ enabled_script_and_eval_ = true;
+}
-void CompilationCache::Disable() {
- enabled_ = false;
+void CompilationCache::DisableScriptAndEval() {
+ enabled_script_and_eval_ = false;
Clear();
}
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 35595b1985..04bea44a82 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -202,9 +202,14 @@ class V8_EXPORT_PRIVATE CompilationCache {
void MarkCompactPrologue();
// Enable/disable compilation cache. Used by debugger to disable compilation
- // cache during debugging to make sure new scripts are always compiled.
- void Enable();
- void Disable();
+ // cache during debugging so that eval and new scripts are always compiled.
+ // TODO(bmeurer, chromium:992277): The RegExp cache cannot be enabled and/or
+ // disabled, since it doesn't affect debugging. However ideally the other
+ // caches should also be always on, even in the presence of the debugger,
+ // but at this point there are too many unclear invariants, and so I decided
+ // to just fix the pressing performance problem for RegExp individually first.
+ void EnableScriptAndEval();
+ void DisableScriptAndEval();
private:
explicit CompilationCache(Isolate* isolate);
@@ -215,7 +220,9 @@ class V8_EXPORT_PRIVATE CompilationCache {
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
- bool IsEnabled() const { return FLAG_compilation_cache && enabled_; }
+ bool IsEnabledScriptAndEval() const {
+ return FLAG_compilation_cache && enabled_script_and_eval_;
+ }
Isolate* isolate() const { return isolate_; }
@@ -227,8 +234,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
CompilationCacheRegExp reg_exp_;
CompilationSubCache* subcaches_[kSubCacheCount];
- // Current enable state of the compilation cache.
- bool enabled_;
+ // Current enable state of the compilation cache for scripts and eval.
+ bool enabled_script_and_eval_;
friend class Isolate;
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index fbd181f5c8..d73be13a30 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -666,21 +666,25 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
function->GetIsolate(),
RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
+ Isolate* isolate = function->GetIsolate();
DisallowHeapAllocation no_gc;
- if (osr_offset.IsNone()) {
- if (function->has_feedback_vector()) {
- FeedbackVector feedback_vector = function->feedback_vector();
- feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "GetCodeFromOptimizedCodeCache");
- Code code = feedback_vector.optimized_code();
-
- if (!code.is_null()) {
- // Caching of optimized code enabled and optimized code found.
- DCHECK(!code.marked_for_deoptimization());
- DCHECK(function->shared().is_compiled());
- return Handle<Code>(code, feedback_vector.GetIsolate());
- }
- }
+ Code code;
+ if (osr_offset.IsNone() && function->has_feedback_vector()) {
+ FeedbackVector feedback_vector = function->feedback_vector();
+ feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
+ function->shared(), "GetCodeFromOptimizedCodeCache");
+ code = feedback_vector.optimized_code();
+ } else if (!osr_offset.IsNone()) {
+ code = function->context()
+ .native_context()
+ .GetOSROptimizedCodeCache()
+ .GetOptimizedCode(shared, osr_offset, isolate);
+ }
+ if (!code.is_null()) {
+ // Caching of optimized code enabled and optimized code found.
+ DCHECK(!code.marked_for_deoptimization());
+ DCHECK(function->shared().is_compiled());
+ return Handle<Code>(code, isolate);
}
return MaybeHandle<Code>();
}
@@ -711,12 +715,15 @@ void InsertCodeIntoOptimizedCodeCache(
// Cache optimized context-specific code.
Handle<JSFunction> function = compilation_info->closure();
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
- Handle<Context> native_context(function->context().native_context(),
- function->GetIsolate());
+ Handle<NativeContext> native_context(function->context().native_context(),
+ function->GetIsolate());
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
FeedbackVector::SetOptimizedCode(vector, code);
+ } else {
+ OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
+ compilation_info->osr_offset());
}
}
@@ -1904,6 +1911,12 @@ struct ScriptCompileTimerScope {
case CacheBehaviour::kConsumeCodeCache:
return isolate_->counters()->compile_script_with_consume_cache();
+ // Note that this only counts the finalization part of streaming, the
+ // actual streaming compile is counted by BackgroundCompileTask into
+ // "compile_script_on_background".
+ case CacheBehaviour::kNoCacheBecauseStreamingSource:
+ return isolate_->counters()->compile_script_streaming_finalization();
+
case CacheBehaviour::kNoCacheBecauseInlineScript:
return isolate_->counters()
->compile_script_no_cache_because_inline_script();
@@ -1923,9 +1936,6 @@ struct ScriptCompileTimerScope {
// TODO(leszeks): Consider counting separately once modules are more
// common.
case CacheBehaviour::kNoCacheBecauseModule:
- // TODO(leszeks): Count separately or remove entirely once we have
- // background compilation.
- case CacheBehaviour::kNoCacheBecauseStreamingSource:
case CacheBehaviour::kNoCacheBecauseV8Extension:
case CacheBehaviour::kNoCacheBecauseExtensionModule:
case CacheBehaviour::kNoCacheBecausePacScript:
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 6816c5b7ad..42b2fa6e9a 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -49,22 +49,22 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
}
ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
- ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
+ ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
DCHECK(!emitted_label_.is_bound());
PerTypeEntryInfo& info = info_[type];
const int entry_size = ConstantPoolEntry::size(type);
bool merged = false;
- if (entry.sharing_ok()) {
+ if (entry->sharing_ok()) {
// Try to merge entries
std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
int end = static_cast<int>(info.shared_entries.size());
for (int i = 0; i < end; i++, it++) {
if ((entry_size == kSystemPointerSize)
- ? entry.value() == it->value()
- : entry.value64() == it->value64()) {
+ ? entry->value() == it->value()
+ : entry->value64() == it->value64()) {
// Merge with found entry.
- entry.set_merged_index(i);
+ entry->set_merged_index(i);
merged = true;
break;
}
@@ -72,16 +72,16 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
}
// By definition, merged entries have regular access.
- DCHECK(!merged || entry.merged_index() < info.regular_count);
+ DCHECK(!merged || entry->merged_index() < info.regular_count);
ConstantPoolEntry::Access access =
(merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
// Enforce an upper bound on search time by limiting the search to
// unique sharable entries which fit in the regular section.
- if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
- info.shared_entries.push_back(entry);
+ if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
+ info.shared_entries.push_back(*entry);
} else {
- info.entries.push_back(entry);
+ info.entries.push_back(*entry);
}
// We're done if we found a match or have already triggered the
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index d07452336b..d2ab5641ae 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -102,13 +102,13 @@ class ConstantPoolBuilder {
ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
bool sharing_ok) {
ConstantPoolEntry entry(position, value, sharing_ok);
- return AddEntry(entry, ConstantPoolEntry::INTPTR);
+ return AddEntry(&entry, ConstantPoolEntry::INTPTR);
}
// Add double constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, Double value) {
ConstantPoolEntry entry(position, value);
- return AddEntry(entry, ConstantPoolEntry::DOUBLE);
+ return AddEntry(&entry, ConstantPoolEntry::DOUBLE);
}
// Add double constant to the embedded constant pool
@@ -138,9 +138,8 @@ class ConstantPoolBuilder {
inline Label* EmittedPosition() { return &emitted_label_; }
private:
- ConstantPoolEntry::Access AddEntry(
- ConstantPoolEntry& entry, // NOLINT(runtime/references)
- ConstantPoolEntry::Type type);
+ ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry,
+ ConstantPoolEntry::Type type);
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index dae9992c57..6b3d3934d0 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -13,7 +13,7 @@ namespace internal {
// CPU feature flags.
enum CpuFeature {
- // x86
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
SSE4_2,
SSE4_1,
SSSE3,
@@ -26,39 +26,46 @@ enum CpuFeature {
LZCNT,
POPCNT,
ATOM,
- // ARM
+
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
// - Standard configurations. The baseline is ARMv6+VFPv2.
ARMv7, // ARMv7-A + VFPv3-D32 + NEON
ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
ARMv8, // ARMv8-A (+ all of the above)
- // MIPS, MIPS64
+
+ // ARM feature aliases (based on the standard configurations above).
+ VFPv3 = ARMv7,
+ NEON = ARMv7,
+ VFP32DREGS = ARMv7,
+ SUDIV = ARMv7_SUDIV,
+
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
FPU,
FP64FPU,
MIPSr1,
MIPSr2,
MIPSr6,
MIPS_SIMD, // MSA instructions
- // PPC
+
+#elif V8_TARGET_ARCH_PPC
+ FPU,
FPR_GPR_MOV,
LWSYNC,
ISELECT,
VSX,
MODULO,
- // S390
+
+#elif V8_TARGET_ARCH_S390X
+ FPU,
DISTINCT_OPS,
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
VECTOR_FACILITY,
VECTOR_ENHANCE_FACILITY_1,
MISC_INSTR_EXT2,
+#endif
- NUMBER_OF_CPU_FEATURES,
-
- // ARM feature aliases (based on the standard configurations above).
- VFPv3 = ARMv7,
- NEON = ARMv7,
- VFP32DREGS = ARMv7,
- SUDIV = ARMv7_SUDIV
+ NUMBER_OF_CPU_FEATURES
};
// CpuFeatures keeps track of which features are supported by the target CPU.
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 44503e532d..e1f873cb38 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -217,10 +217,8 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
FUNCTION_REFERENCE(incremental_marking_record_write_function,
IncrementalMarking::RecordWriteFromCode)
-ExternalReference ExternalReference::store_buffer_overflow_function() {
- return ExternalReference(
- Redirect(Heap::store_buffer_overflow_function_address()));
-}
+FUNCTION_REFERENCE(insert_remembered_set_function,
+ Heap::InsertIntoRememberedSetFromCode)
FUNCTION_REFERENCE(delete_handle_scope_extensions,
HandleScope::DeleteExtensions)
@@ -342,10 +340,6 @@ ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) {
return ExternalReference(address);
}
-ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
- return ExternalReference(isolate->heap()->store_buffer_top_address());
-}
-
ExternalReference ExternalReference::heap_is_marking_flag_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->IsMarkingFlagAddress());
@@ -529,19 +523,19 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_asin_function, base::ieee754::asin,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function, base::ieee754::asinh,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan_function, base::ieee754::atan,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function, base::ieee754::atanh,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2,
BUILTIN_FP_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
@@ -549,7 +543,7 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_expm1_function, base::ieee754::expm1,
- BUILTIN_FP_FP_CALL)
+ BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function, base::ieee754::log,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log1p_function, base::ieee754::log1p,
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 45c26bdfb0..7cc0241fc4 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -38,7 +38,6 @@ class StatsCounter;
V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \
V(address_of_jslimit, "StackGuard::address_of_jslimit()") \
V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \
- V(store_buffer_top, "store_buffer_top") \
V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \
V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \
V(new_space_allocation_limit_address, \
@@ -143,6 +142,7 @@ class StatsCounter;
V(ieee754_tanh_function, "base::ieee754::tanh") \
V(incremental_marking_record_write_function, \
"IncrementalMarking::RecordWrite") \
+ V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \
V(invalidate_prototype_chains_function, \
"JSObject::InvalidatePrototypeChains()") \
V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
@@ -170,7 +170,6 @@ class StatsCounter;
V(search_string_raw_two_one, "search_string_raw_two_one") \
V(search_string_raw_two_two, "search_string_raw_two_two") \
V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \
- V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
V(try_internalize_string_function, "try_internalize_string_function") \
V(wasm_call_trap_callback_for_testing, \
"wasm::call_trap_callback_for_testing") \
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index e274b41fa3..174a483868 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -39,6 +39,7 @@
#include "src/codegen/ia32/assembler-ia32.h"
+#include "src/base/memory.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
@@ -58,12 +59,12 @@ void RelocInfo::apply(intptr_t delta) {
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
IsOffHeapTarget(rmode_)) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // Relocate entry.
+ base::WriteUnalignedValue(pc_,
+ base::ReadUnalignedValue<int32_t>(pc_) - delta);
} else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // Relocate entry.
+ // Absolute code pointer inside code object moves with the code object.
+ base::WriteUnalignedValue(pc_,
+ base::ReadUnalignedValue<int32_t>(pc_) + delta);
}
}
@@ -103,7 +104,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index aefcab7299..405e4b7c55 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -272,8 +272,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
@@ -2163,70 +2163,6 @@ void Assembler::divsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::xorpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::andps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::andnps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x55);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::orps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x56);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::xorps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::addps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::subps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::mulps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::divps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
void Assembler::rcpps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2234,29 +2170,31 @@ void Assembler::rcpps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::rsqrtps(XMMRegister dst, Operand src) {
+void Assembler::sqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x52);
+ EMIT(0x51);
emit_sse_operand(dst, src);
}
-void Assembler::minps(XMMRegister dst, Operand src) {
+void Assembler::rsqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x5D);
+ EMIT(0x52);
emit_sse_operand(dst, src);
}
-void Assembler::maxps(XMMRegister dst, Operand src) {
+void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
- EMIT(0x5F);
+ EMIT(0xC2);
emit_sse_operand(dst, src);
+ EMIT(cmp);
}
-void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
+void Assembler::cmppd(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
+ EMIT(0x66);
EMIT(0x0F);
EMIT(0xC2);
emit_sse_operand(dst, src);
@@ -2280,22 +2218,6 @@ void Assembler::haddps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::orpd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x56);
- emit_sse_operand(dst, src);
-}
-
void Assembler::ucomisd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2398,6 +2320,16 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
+void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
void Assembler::movdqa(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2776,6 +2708,23 @@ void Assembler::minss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+// Packed single-precision floating-point SSE instructions.
+void Assembler::ps(byte opcode, XMMRegister dst, Operand src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(opcode);
+ emit_sse_operand(dst, src);
+}
+
+// Packed double-precision floating-point SSE instructions.
+void Assembler::pd(byte opcode, XMMRegister dst, Operand src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(opcode);
+ emit_sse_operand(dst, src);
+}
+
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
Operand src2) {
@@ -2811,12 +2760,25 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, k66, k0F, kWIG);
}
+void Assembler::vshufpd(XMMRegister dst, XMMRegister src1, Operand src2,
+ byte imm8) {
+ DCHECK(is_uint8(imm8));
+ vpd(0xC6, dst, src1, src2);
+ EMIT(imm8);
+}
+
void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
uint8_t cmp) {
vps(0xC2, dst, src1, src2);
EMIT(cmp);
}
+void Assembler::vcmppd(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ EMIT(cmp);
+}
+
void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2,
byte imm8) {
DCHECK(is_uint8(imm8));
@@ -2848,6 +2810,12 @@ void Assembler::vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
EMIT(imm8);
}
+void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8) {
+ XMMRegister iop = XMMRegister::from_code(2);
+ vinstr(0x73, iop, dst, Operand(src), k66, k0F, kWIG);
+ EMIT(imm8);
+}
+
void Assembler::vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
XMMRegister iop = XMMRegister::from_code(4);
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
@@ -3158,11 +3126,10 @@ void Assembler::emit_operand(int code, Operand adr) {
DCHECK_GT(length, 0);
// Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (code << 3);
+ EMIT((adr.buf_[0] & ~0x38) | (code << 3));
// Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
+ for (unsigned i = 1; i < length; i++) EMIT(adr.buf_[i]);
// Emit relocation information if necessary.
if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 5225621276..8161ff8322 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -38,6 +38,7 @@
#define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
#include <deque>
+#include <memory>
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/constants-ia32.h"
@@ -292,7 +293,7 @@ class V8_EXPORT_PRIVATE Operand {
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
- // TODO(clemensh): Get rid of this friendship, or make Operand immutable.
+ // TODO(clemensb): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
@@ -371,7 +372,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// own buffer. Otherwise it takes ownership of the provided buffer.
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -512,6 +512,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movzx_w(Register dst, Operand src);
void movq(XMMRegister dst, Operand src);
+
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
@@ -849,56 +850,54 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movups(XMMRegister dst, Operand src);
void movups(Operand dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+ void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
void maxss(XMMRegister dst, Operand src);
void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
void minss(XMMRegister dst, Operand src);
- void andps(XMMRegister dst, Operand src);
- void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
- void andnps(XMMRegister dst, Operand src);
- void andnps(XMMRegister dst, XMMRegister src) { andnps(dst, Operand(src)); }
- void xorps(XMMRegister dst, Operand src);
- void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
- void orps(XMMRegister dst, Operand src);
- void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
-
- void addps(XMMRegister dst, Operand src);
- void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
- void subps(XMMRegister dst, Operand src);
- void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
- void mulps(XMMRegister dst, Operand src);
- void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
- void divps(XMMRegister dst, Operand src);
- void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
void rcpps(XMMRegister dst, Operand src);
void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
+ void sqrtps(XMMRegister dst, Operand src);
+ void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); }
void rsqrtps(XMMRegister dst, Operand src);
void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
void haddps(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
-
- void minps(XMMRegister dst, Operand src);
- void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
- void maxps(XMMRegister dst, Operand src);
- void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
+ void sqrtpd(XMMRegister dst, Operand src) {
+ sse2_instr(dst, src, 0x66, 0x0F, 0x51);
+ }
+ void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); }
void cmpps(XMMRegister dst, Operand src, uint8_t cmp);
void cmpps(XMMRegister dst, XMMRegister src, uint8_t cmp) {
cmpps(dst, Operand(src), cmp);
}
-#define SSE_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src) { \
- cmpps(dst, Operand(src), imm8); \
- } \
- void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); }
+ void cmppd(XMMRegister dst, Operand src, uint8_t cmp);
+ void cmppd(XMMRegister dst, XMMRegister src, uint8_t cmp) {
+ cmppd(dst, Operand(src), cmp);
+ }
+
+// Packed floating-point comparison operations.
+#define PACKED_CMP_LIST(V) \
+ V(cmpeq, 0x0) \
+ V(cmplt, 0x1) \
+ V(cmple, 0x2) \
+ V(cmpunord, 0x3) \
+ V(cmpneq, 0x4)
- SSE_CMP_P(cmpeq, 0x0)
- SSE_CMP_P(cmplt, 0x1)
- SSE_CMP_P(cmple, 0x2)
- SSE_CMP_P(cmpneq, 0x4)
+#define SSE_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src) { \
+ cmpps(dst, Operand(src), imm8); \
+ } \
+ void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
+ void instr##pd(XMMRegister dst, XMMRegister src) { \
+ cmppd(dst, Operand(src), imm8); \
+ } \
+ void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
+ PACKED_CMP_LIST(SSE_CMP_P)
#undef SSE_CMP_P
// SSE2 instructions
@@ -941,22 +940,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mulsd(XMMRegister dst, Operand src);
void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); }
void divsd(XMMRegister dst, Operand src);
- void xorpd(XMMRegister dst, XMMRegister src) { xorpd(dst, Operand(src)); }
- void xorpd(XMMRegister dst, Operand src);
void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
void sqrtsd(XMMRegister dst, Operand src);
- void andpd(XMMRegister dst, XMMRegister src) { andpd(dst, Operand(src)); }
- void andpd(XMMRegister dst, Operand src);
- void orpd(XMMRegister dst, XMMRegister src) { orpd(dst, Operand(src)); }
- void orpd(XMMRegister dst, Operand src);
-
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, Operand src);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void movapd(XMMRegister dst, XMMRegister src) { movapd(dst, Operand(src)); }
+ void movapd(XMMRegister dst, Operand src) {
+ sse2_instr(dst, src, 0x66, 0x0F, 0x28);
+ }
+
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
@@ -1298,6 +1295,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vrcpps(XMMRegister dst, Operand src) {
vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
}
+ void vsqrtps(XMMRegister dst, XMMRegister src) { vsqrtps(dst, Operand(src)); }
+ void vsqrtps(XMMRegister dst, Operand src) {
+ vinstr(0x51, dst, xmm0, src, kNone, k0F, kWIG);
+ }
void vrsqrtps(XMMRegister dst, XMMRegister src) {
vrsqrtps(dst, Operand(src));
}
@@ -1310,14 +1311,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x7C, dst, src1, src2, kF2, k0F, kWIG);
}
+ void vsqrtpd(XMMRegister dst, XMMRegister src) { vsqrtpd(dst, Operand(src)); }
+ void vsqrtpd(XMMRegister dst, Operand src) {
+ vinstr(0x51, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovaps(XMMRegister dst, XMMRegister src) { vmovaps(dst, Operand(src)); }
void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
+ void vmovapd(XMMRegister dst, XMMRegister src) { vmovapd(dst, Operand(src)); }
+ void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); }
void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
vshufps(dst, src1, Operand(src2), imm8);
}
void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
+ void vshufpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
+ vshufpd(dst, src1, Operand(src2), imm8);
+ }
+ void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8);
@@ -1325,6 +1336,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8);
+ void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshufhw(dst, Operand(src), shuffle);
@@ -1489,6 +1501,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void rorx(Register dst, Operand src, byte imm8);
+ // Implementation of packed single-precision floating-point SSE instructions.
+ void ps(byte op, XMMRegister dst, Operand src);
+ // Implementation of packed double-precision floating-point SSE instructions.
+ void pd(byte op, XMMRegister dst, Operand src);
+
#define PACKED_OP_LIST(V) \
V(and, 0x54) \
V(andn, 0x55) \
@@ -1501,6 +1518,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
V(div, 0x5e) \
V(max, 0x5f)
+#define SSE_PACKED_OP_DECLARE(name, opcode) \
+ void name##ps(XMMRegister dst, XMMRegister src) { \
+ ps(opcode, dst, Operand(src)); \
+ } \
+ void name##ps(XMMRegister dst, Operand src) { ps(opcode, dst, src); } \
+ void name##pd(XMMRegister dst, XMMRegister src) { \
+ pd(opcode, dst, Operand(src)); \
+ } \
+ void name##pd(XMMRegister dst, Operand src) { pd(opcode, dst, src); }
+
+ PACKED_OP_LIST(SSE_PACKED_OP_DECLARE)
+#undef SSE_PACKED_OP_DECLARE
+
#define AVX_PACKED_OP_DECLARE(name, opcode) \
void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vps(opcode, dst, src1, Operand(src2)); \
@@ -1516,24 +1546,32 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
PACKED_OP_LIST(AVX_PACKED_OP_DECLARE)
+#undef AVX_PACKED_OP_DECLARE
+#undef PACKED_OP_LIST
+
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, Operand(src2), imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
- vcmpps(dst, src1, src2, imm8); \
- }
-
- AVX_CMP_P(vcmpeq, 0x0)
- AVX_CMP_P(vcmplt, 0x1)
- AVX_CMP_P(vcmple, 0x2)
- AVX_CMP_P(vcmpneq, 0x4)
-
+ void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
+
+#define AVX_CMP_P(instr, imm8) \
+ void v##instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, Operand(src2), imm8); \
+ } \
+ void v##instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void v##instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmppd(dst, src1, Operand(src2), imm8); \
+ } \
+ void v##instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ }
+
+ PACKED_CMP_LIST(AVX_CMP_P)
#undef AVX_CMP_P
+#undef PACKED_CMP_LIST
// Other SSE and AVX instructions
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 070f315977..dd11bc496e 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -1168,57 +1168,44 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
-
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- push(eax);
- cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
- pop(eax);
- j(equal, &skip_hook);
-
- {
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg());
- Push(actual.reg());
- SmiUntag(actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Operand receiver_op =
- actual.is_reg()
- ? Operand(ebp, actual.reg(), times_system_pointer_size,
- kSystemPointerSize * 2)
- : Operand(ebp, actual.immediate() * times_system_pointer_size +
- kSystemPointerSize * 2);
- Push(receiver_op);
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg());
- }
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Operand receiver_op =
+ actual.is_reg()
+ ? Operand(ebp, actual.reg(), times_system_pointer_size,
+ kSystemPointerSize * 2)
+ : Operand(ebp, actual.immediate() * times_system_pointer_size +
+ kSystemPointerSize * 2);
+ Push(receiver_op);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
}
- bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1233,7 +1220,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax);
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ push(eax);
+ cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
+ pop(eax);
+ j(not_equal, &debug_hook, Label::kNear);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -1256,8 +1252,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(ecx);
}
- bind(&done);
}
+ jmp(&done, Label::kNear);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ jmp(&continue_after_hook, Label::kNear);
+
+ bind(&done);
}
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
@@ -1479,6 +1482,15 @@ void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) {
}
}
+void TurboAssembler::Psrlq(XMMRegister dst, uint8_t shift) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlq(dst, dst, shift);
+ } else {
+ psrlq(dst, shift);
+ }
+}
+
void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index c65871cfad..9e7774c55d 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -237,6 +237,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void Psraw(XMMRegister dst, uint8_t shift);
void Psrlw(XMMRegister dst, uint8_t shift);
+ void Psrlq(XMMRegister dst, uint8_t shift);
// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -258,6 +259,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&)
#undef AVX_OP2_WITH_TYPE
@@ -278,6 +282,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Packsswb, packsswb)
AVX_OP3_XO(Packuswb, packuswb)
+ AVX_OP3_XO(Paddusb, paddusb)
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
@@ -294,10 +299,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Xorpd, xorpd)
AVX_OP3_XO(Sqrtss, sqrtss)
AVX_OP3_XO(Sqrtsd, sqrtsd)
+ AVX_OP3_XO(Orpd, orpd)
+ AVX_OP3_XO(Andnpd, andnpd)
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
+// Only use this macro when dst and src1 is the same in SSE case.
+#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
+ void macro_name(dst_type dst, dst_type src1, src_type src2) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, src1, src2); \
+ } else { \
+ DCHECK_EQ(dst, src1); \
+ name(dst, src2); \
+ } \
+ }
+#define AVX_PACKED_OP3(macro_name, name) \
+ AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
+ AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+
+ AVX_PACKED_OP3(Addpd, addpd)
+ AVX_PACKED_OP3(Subpd, subpd)
+ AVX_PACKED_OP3(Mulpd, mulpd)
+ AVX_PACKED_OP3(Divpd, divpd)
+ AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
+ AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
+ AVX_PACKED_OP3(Cmpltpd, cmpltpd)
+ AVX_PACKED_OP3(Cmplepd, cmplepd)
+ AVX_PACKED_OP3(Minpd, minpd)
+ AVX_PACKED_OP3(Maxpd, maxpd)
+ AVX_PACKED_OP3(Cmpunordpd, cmpunordpd)
+#undef AVX_PACKED_OP3
+#undef AVX_PACKED_OP3_WITH_TYPE
+
// Non-SSE2 instructions.
#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
sse_scope) \
@@ -529,11 +565,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- // On function call, call into the debugger if necessary.
+ // On function call, call into the debugger.
// This may clobber ecx.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index f537ebc899..1525f814cd 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -278,6 +278,11 @@ void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
+void GetIteratorStackParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 544d62fd9f..e305d666a3 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -9,12 +9,17 @@
#include "src/codegen/machine-type.h"
#include "src/codegen/register-arch.h"
+#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
+#define TORQUE_BUILTIN_LIST_TFC(V) \
+ BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN)
+
#define INTERFACE_DESCRIPTOR_LIST(V) \
V(Abort) \
V(Allocate) \
@@ -52,6 +57,7 @@ namespace internal {
V(FastNewFunctionContext) \
V(FastNewObject) \
V(FrameDropperTrampoline) \
+ V(GetIteratorStackParameter) \
V(GetProperty) \
V(GrowArrayElements) \
V(InterpreterCEntry1) \
@@ -89,7 +95,8 @@ namespace internal {
V(WasmTableGet) \
V(WasmTableSet) \
V(WasmThrow) \
- BUILTIN_LIST_TFS(V)
+ BUILTIN_LIST_TFS(V) \
+ TORQUE_BUILTIN_LIST_TFC(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
@@ -486,6 +493,46 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
};
+// This class is subclassed by Torque-generated call interface descriptors.
+template <int parameter_count>
+class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ static constexpr int kDescriptorFlags = CallInterfaceDescriptorData::kNoFlags;
+ static constexpr int kParameterCount = parameter_count;
+ enum ParameterIndices { kContext = kParameterCount };
+ template <int i>
+ static ParameterIndices ParameterIndex() {
+ STATIC_ASSERT(0 <= i && i < kParameterCount);
+ return static_cast<ParameterIndices>(i);
+ }
+ static constexpr int kReturnCount = 1;
+
+ using CallInterfaceDescriptor::CallInterfaceDescriptor;
+
+ protected:
+ static const int kRegisterParams =
+ kParameterCount > kMaxTFSBuiltinRegisterParams
+ ? kMaxTFSBuiltinRegisterParams
+ : kParameterCount;
+ static const int kStackParams = kParameterCount - kRegisterParams;
+ virtual MachineType ReturnType() = 0;
+ virtual std::array<MachineType, kParameterCount> ParameterTypes() = 0;
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override {
+ DefaultInitializePlatformSpecific(data, kRegisterParams);
+ }
+ void InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) override {
+ std::vector<MachineType> machine_types = {ReturnType()};
+ auto parameter_types = ParameterTypes();
+ machine_types.insert(machine_types.end(), parameter_types.begin(),
+ parameter_types.end());
+ DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
+ data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
+ kParameterCount, machine_types.data(),
+ static_cast<int>(machine_types.size()));
+ }
+};
+
// Dummy descriptor used to mark builtins that don't yet have their proper
// descriptor associated.
using DummyDescriptor = VoidDescriptor;
@@ -706,7 +753,7 @@ class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kScopeInfo, kSlots)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kScopeInfo
- MachineType::Int32()) // kSlots
+ MachineType::Uint32()) // kSlots
DECLARE_DESCRIPTOR(FastNewFunctionContextDescriptor, CallInterfaceDescriptor)
static const Register ScopeInfoRegister();
@@ -771,6 +818,16 @@ class AsyncFunctionStackParameterDescriptor final
CallInterfaceDescriptor)
};
+class GetIteratorStackParameterDescriptor final
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged())
+ DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor,
+ CallInterfaceDescriptor)
+};
+
class GetPropertyDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kKey)
@@ -1298,6 +1355,11 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DEFINE_TFS_BUILTIN_DESCRIPTOR
+// This file contains interface descriptor class definitions for builtins
+// defined in Torque. It is included here because the class definitions need to
+// precede the definition of name##Descriptor::key() below.
+#include "torque-generated/interface-descriptors-tq.inc"
+
#undef DECLARE_DEFAULT_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index 15e3df65c5..a0bef4e07d 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
@@ -114,6 +115,10 @@ class MachineType {
constexpr bool IsCompressedPointer() const {
return representation() == MachineRepresentation::kCompressedPointer;
}
+ constexpr static MachineRepresentation TaggedRepresentation() {
+ return (kTaggedSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+ }
constexpr static MachineRepresentation PointerRepresentation() {
return (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64;
@@ -239,71 +244,79 @@ class MachineType {
// pointer flag is enabled. Otherwise, they returned the corresponding tagged
// one.
constexpr static MachineRepresentation RepCompressedTagged() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineRepresentation::kCompressed;
-#else
- return MachineRepresentation::kTagged;
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineRepresentation::kCompressed;
+ } else {
+ return MachineRepresentation::kTagged;
+ }
}
constexpr static MachineRepresentation RepCompressedTaggedSigned() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineRepresentation::kCompressedSigned;
-#else
- return MachineRepresentation::kTaggedSigned;
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineRepresentation::kCompressedSigned;
+ } else {
+ return MachineRepresentation::kTaggedSigned;
+ }
}
constexpr static MachineRepresentation RepCompressedTaggedPointer() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineRepresentation::kCompressedPointer;
-#else
- return MachineRepresentation::kTaggedPointer;
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineRepresentation::kCompressedPointer;
+ } else {
+ return MachineRepresentation::kTaggedPointer;
+ }
+ }
+
+ constexpr static MachineType TypeRawTagged() {
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::Int32();
+ } else {
+ return MachineType::Pointer();
+ }
}
constexpr static MachineType TypeCompressedTagged() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineType::AnyCompressed();
-#else
- return MachineType::AnyTagged();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::AnyCompressed();
+ } else {
+ return MachineType::AnyTagged();
+ }
}
constexpr static MachineType TypeCompressedTaggedSigned() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineType::CompressedSigned();
-#else
- return MachineType::TaggedSigned();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::CompressedSigned();
+ } else {
+ return MachineType::TaggedSigned();
+ }
}
constexpr static MachineType TypeCompressedTaggedPointer() {
-#ifdef V8_COMPRESS_POINTERS
- return MachineType::CompressedPointer();
-#else
- return MachineType::TaggedPointer();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return MachineType::CompressedPointer();
+ } else {
+ return MachineType::TaggedPointer();
+ }
}
constexpr bool IsCompressedTagged() const {
-#ifdef V8_COMPRESS_POINTERS
- return IsCompressed();
-#else
- return IsTagged();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsCompressed();
+ } else {
+ return IsTagged();
+ }
}
constexpr bool IsCompressedTaggedSigned() const {
-#ifdef V8_COMPRESS_POINTERS
- return IsCompressedSigned();
-#else
- return IsTaggedSigned();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsCompressedSigned();
+ } else {
+ return IsTaggedSigned();
+ }
}
constexpr bool IsCompressedTaggedPointer() const {
-#ifdef V8_COMPRESS_POINTERS
- return IsCompressedPointer();
-#else
- return IsTaggedPointer();
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsCompressedPointer();
+ } else {
+ return IsTaggedPointer();
+ }
}
static MachineType TypeForRepresentation(const MachineRepresentation& rep,
@@ -405,11 +418,11 @@ inline bool IsAnyCompressed(MachineRepresentation rep) {
}
inline bool IsAnyCompressedTagged(MachineRepresentation rep) {
-#ifdef V8_COMPRESS_POINTERS
- return IsAnyCompressed(rep);
-#else
- return IsAnyTagged(rep);
-#endif
+ if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) {
+ return IsAnyCompressed(rep);
+ } else {
+ return IsAnyTagged(rep);
+ }
}
// Gets the log2 of the element size in bytes of the machine type.
@@ -431,7 +444,6 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- return kSystemPointerSizeLog2;
case MachineRepresentation::kCompressedSigned:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h
index d8181ad8f5..53e6f93411 100644
--- a/deps/v8/src/codegen/mips/assembler-mips-inl.h
+++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h
@@ -133,7 +133,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
if (Assembler::IsJicOrJialc(instr2)) {
// Encoded internal references are lui/jic load of 32-bit absolute address.
uint32_t lui_offset_u, jic_offset_u;
- Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
@@ -183,7 +183,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 423da2fb65..768b16b86c 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -231,8 +231,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
@@ -742,27 +742,27 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
// before that addition, difference between upper part of the target address and
// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
// in jic register with lui instruction.
-void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
- int16_t& jic_offset) {
- lui_offset = (address & kHiMask) >> kLuiShift;
- jic_offset = address & kLoMask;
+void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
+ int16_t* jic_offset) {
+ *lui_offset = (address & kHiMask) >> kLuiShift;
+ *jic_offset = address & kLoMask;
- if (jic_offset < 0) {
- lui_offset -= kImm16Mask;
+ if (*jic_offset < 0) {
+ *lui_offset -= kImm16Mask;
}
}
void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
- uint32_t& lui_offset,
- uint32_t& jic_offset) {
+ uint32_t* lui_offset,
+ uint32_t* jic_offset) {
int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
int16_t jic_offset16 = address & kLoMask;
if (jic_offset16 < 0) {
lui_offset16 -= kImm16Mask;
}
- lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
- jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
+ *lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
+ *jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
}
void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
@@ -977,7 +977,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
- UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u);
instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
@@ -1928,7 +1928,7 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-void Assembler::AdjustBaseAndOffset(MemOperand& src,
+void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@@ -1941,26 +1941,26 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
- bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
- if (is_int16(src.offset()) &&
+ if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
- src.offset() + second_access_add_to_offset)))) {
+ src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
- // while loading 'offset'.
+ DCHECK(src->rm() != scratch); // Must not overwrite the register 'base'
+ // while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
- uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+ uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@@ -1972,13 +1972,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
- addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
- src.offset_ -= kMinOffsetForSimpleAdjustment;
- } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
- src.offset() < 0) {
- addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
- src.offset_ += kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ addiu(at, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (IsMipsArchVariant(kMips32r6)) {
// On r6 take advantage of the aui instruction, e.g.:
// aui at, base, offset_high
@@ -1989,12 +1989,12 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// addiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
- int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
- int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
+ int16_t offset_low = static_cast<uint16_t>(src->offset());
offset_high += (offset_low < 0)
? 1
: 0; // Account for offset sign extension in load/store.
- aui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
+ aui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (two_accesses && !is_int16(static_cast<int32_t>(
offset_low + second_access_add_to_offset))) {
// Avoid overflow in the 16-bit offset of the load/store instruction when
@@ -2002,7 +2002,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
addiu(scratch, scratch, kDoubleSize);
offset_low -= kDoubleSize;
}
- src.offset_ = offset_low;
+ src->offset_ = offset_low;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@@ -2013,62 +2013,62 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
- addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
+ addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
- src.offset_ -= kMinOffsetForMediumAdjustment;
- } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
- src.offset() < 0) {
- addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ src->offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
- src.offset_ += kMinOffsetForMediumAdjustment;
+ src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
- int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
- addu(scratch, scratch, src.rm());
- src.offset_ -= loaded_offset;
+ addu(scratch, scratch, src->rm());
+ src->offset_ -= loaded_offset;
}
}
- src.rm_ = scratch;
+ src->rm_ = scratch;
- DCHECK(is_int16(src.offset()));
+ DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
- static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
- DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
+ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LB, source.rm(), rd, source.offset());
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LBU, source.rm(), rd, source.offset());
}
void Assembler::lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LH, source.rm(), rd, source.offset());
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LHU, source.rm(), rd, source.offset());
}
void Assembler::lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(LW, source.rm(), rd, source.offset());
}
@@ -2088,19 +2088,19 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SB, source.rm(), rd, source.offset());
}
void Assembler::sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SH, source.rm(), rd, source.offset());
}
void Assembler::sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
GenInstrImmediate(SW, source.rm(), rd, source.offset());
}
@@ -2385,13 +2385,13 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
}
@@ -2969,7 +2969,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
#define MSA_LD_ST(name, opcode) \
void Assembler::name(MSARegister wd, const MemOperand& rs) { \
MemOperand source = rs; \
- AdjustBaseAndOffset(source); \
+ AdjustBaseAndOffset(&source); \
if (is_int10(source.offset())) { \
GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
} else { \
@@ -3473,7 +3473,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
- Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ Assembler::UnpackTargetAddressUnsigned(imm,
+ &lui_offset_u, &jic_offset_u);
instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
@@ -3717,7 +3718,7 @@ void Assembler::set_target_value_at(Address pc, uint32_t target,
if (IsJicOrJialc(instr2)) {
// Must use 2 instructions to insure patchable code => use lui and jic
uint32_t lui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
instr1 &= ~kImm16Mask;
instr2 &= ~kImm16Mask;
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 0359be2c94..d8cb8ec3f2 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -36,6 +36,7 @@
#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
+#include <memory>
#include <set>
@@ -1478,13 +1479,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
- static void UnpackTargetAddress(
- uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
- int16_t& jic_offset); // NOLINT(runtime/references)
- static void UnpackTargetAddressUnsigned(
- uint32_t address,
- uint32_t& lui_offset, // NOLINT(runtime/references)
- uint32_t& jic_offset); // NOLINT(runtime/references)
+ static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
+ int16_t* jic_offset);
+ static void UnpackTargetAddressUnsigned(uint32_t address,
+ uint32_t* lui_offset,
+ uint32_t* jic_offset);
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@@ -1515,7 +1514,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src, // NOLINT(runtime/references)
+ MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 2e4698a9e7..760d33d7c9 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -1063,7 +1063,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
@@ -1089,7 +1089,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
@@ -1105,7 +1105,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1140,7 +1140,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1177,7 +1177,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
if (scratch != rd) {
mov(scratch, rd);
@@ -1256,7 +1256,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
@@ -1284,7 +1284,7 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES);
swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
@@ -1305,13 +1305,13 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lw(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand dest = rs;
- AdjustBaseAndOffset(dest);
+ AdjustBaseAndOffset(&dest);
sw(rd, dest);
}
@@ -2926,18 +2926,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
- offset = GetOffset(offset, L, bits);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt) {
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
- scratch = GetRtAsRegisterHelper(rt, scratch);
- offset = GetOffset(offset, L, bits);
+ *scratch = GetRtAsRegisterHelper(rt, *scratch);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
@@ -2955,23 +2955,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
break;
case eq:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beq(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beqc(rs, scratch, offset);
}
@@ -2980,16 +2980,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bne(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bnec(rs, scratch, offset);
}
@@ -3001,14 +3001,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(scratch, rs, offset);
@@ -3017,17 +3017,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(rs, scratch, offset);
@@ -3038,14 +3038,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(rs, scratch, offset);
@@ -3054,17 +3054,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(scratch, rs, offset);
@@ -3077,14 +3077,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
@@ -3093,17 +3093,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Ugreater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
beqzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
@@ -3114,13 +3114,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
break; // No code needs to be emitted.
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
@@ -3129,17 +3129,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Uless_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
return false;
bc(offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
@@ -3418,7 +3418,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
break;
case eq:
@@ -3440,11 +3440,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3456,14 +3456,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3477,11 +3477,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3493,14 +3493,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= r2
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3751,8 +3751,8 @@ void TurboAssembler::Jump(Register target, const Operand& offset,
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
!is_int16(offset.immediate())) {
uint32_t aui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
- jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset,
+ &jic_offset);
RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
aui(target, target, aui_offset);
if (cond == cc_always) {
@@ -3790,7 +3790,7 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
// This is not an issue, t9 is expected to be clobbered anyway.
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target);
}
@@ -3853,10 +3853,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::Jump(const ExternalReference& reference) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, reference);
- Jump(scratch);
+ li(t9, reference);
+ Jump(t9);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -3940,7 +3938,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
int32_t target_int = static_cast<int32_t>(target);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
uint32_t lui_offset, jialc_offset;
- UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
+ UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset);
if (MustUseReg(rmode)) {
RecordRelocInfo(rmode, target_int);
}
@@ -3990,7 +3988,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
Call(code.address(), rmode, cond, rs, rt, bd);
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index d9c372f868..e82c88f0b5 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -849,12 +849,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits,
- Register& scratch, // NOLINT(runtime/references)
- const Operand& rt);
+ // TODO(mips) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
index 7b9946d16e..cacdbd8f8b 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -159,7 +159,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 801faf6306..37a05585c4 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -207,8 +207,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
case HeapObjectRequest::kStringConstant:
const StringConstantBase* str = request.string();
@@ -1996,7 +1996,7 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-void Assembler::AdjustBaseAndOffset(MemOperand& src,
+void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@@ -2009,25 +2009,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
- bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
- if (is_int16(src.offset()) &&
+ if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
- src.offset() + second_access_add_to_offset)))) {
+ src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
- DCHECK(src.rm() !=
+ DCHECK(src->rm() !=
at); // Must not overwrite the register 'base' while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
- uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+ uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@@ -2042,13 +2042,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
- daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment);
- src.offset_ -= kMinOffsetForSimpleAdjustment;
- } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
- src.offset() < 0) {
- daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment);
- src.offset_ += kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (kArchVariant == kMips64r6) {
// On r6 take advantage of the daui instruction, e.g.:
// daui at, base, offset_high
@@ -2060,9 +2060,9 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// daddiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
- int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int16_t offset_low = static_cast<uint16_t>(src->offset());
int32_t offset_low32 = offset_low;
- int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
+ int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
bool increment_hi16 = offset_low < 0;
bool overflow_hi16 = false;
@@ -2070,7 +2070,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
offset_high++;
overflow_hi16 = (offset_high == -32768);
}
- daui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
+ daui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (overflow_hi16) {
dahi(scratch, 1);
@@ -2084,7 +2084,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
offset_low32 -= kDoubleSize;
}
- src.offset_ = offset_low32;
+ src->offset_ = offset_low32;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@@ -2095,33 +2095,33 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
- if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
- daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
+ daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
- src.offset_ -= kMinOffsetForMediumAdjustment;
- } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
- src.offset() < 0) {
- daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ src->offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
- src.offset_ += kMinOffsetForMediumAdjustment;
+ src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
- int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
- daddu(scratch, scratch, src.rm());
- src.offset_ -= loaded_offset;
+ daddu(scratch, scratch, src->rm());
+ src->offset_ -= loaded_offset;
}
}
- src.rm_ = scratch;
+ src->rm_ = scratch;
- DCHECK(is_int16(src.offset()));
+ DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
- static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
- DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
+ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
@@ -3169,7 +3169,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
#define MSA_LD_ST(name, opcode) \
void Assembler::name(MSARegister wd, const MemOperand& rs) { \
MemOperand source = rs; \
- AdjustBaseAndOffset(source); \
+ AdjustBaseAndOffset(&source); \
if (is_int10(source.offset())) { \
GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
} else { \
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 9695aa6524..48733eebea 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -36,7 +36,7 @@
#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
-
+#include <memory>
#include <set>
#include "src/codegen/assembler.h"
@@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src, // NOLINT(runtime/references)
+ MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index b353786064..2ea770d224 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1166,7 +1166,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
@@ -1201,7 +1201,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3);
swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
@@ -1216,7 +1216,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1250,7 +1250,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
@@ -1286,7 +1286,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1);
if (scratch != rd) {
mov(scratch, rd);
@@ -1314,7 +1314,7 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7);
if (rd != source.rm()) {
ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
@@ -1349,7 +1349,7 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
- AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
+ AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7);
sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset));
sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset));
}
@@ -1411,91 +1411,91 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lb(rd, source);
}
void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lbu(rd, source);
}
void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sb(rd, source);
}
void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lh(rd, source);
}
void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lhu(rd, source);
}
void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sh(rd, source);
}
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lw(rd, source);
}
void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
lwu(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sw(rd, source);
}
void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
ld(rd, source);
}
void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
MemOperand source = rs;
- AdjustBaseAndOffset(source);
+ AdjustBaseAndOffset(&source);
sd(rd, source);
}
void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
lwc1(fd, tmp);
}
void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
swc1(fs, tmp);
}
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
ldc1(fd, tmp);
}
void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
- AdjustBaseAndOffset(tmp);
+ AdjustBaseAndOffset(&tmp);
sdc1(fs, tmp);
}
@@ -3362,18 +3362,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
- offset = GetOffset(offset, L, bits);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
-bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt) {
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
- scratch = GetRtAsRegisterHelper(rt, scratch);
- offset = GetOffset(offset, L, bits);
+ *scratch = GetRtAsRegisterHelper(rt, *scratch);
+ *offset = GetOffset(*offset, L, bits);
return true;
}
@@ -3392,23 +3392,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
break;
case eq:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beq(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
beqc(rs, scratch, offset);
}
@@ -3417,16 +3417,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bne(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bnec(rs, scratch, offset);
}
@@ -3438,14 +3438,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(scratch, rs, offset);
@@ -3454,17 +3454,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(rs, scratch, offset);
@@ -3475,14 +3475,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(rs, scratch, offset);
@@ -3491,17 +3491,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(scratch, rs, offset);
@@ -3514,14 +3514,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
@@ -3530,17 +3530,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Ugreater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
beqzc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
@@ -3551,13 +3551,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
break; // No code needs to be emitted.
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
@@ -3566,17 +3566,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
case Uless_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
return false;
bc(offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
@@ -3858,7 +3858,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
switch (cond) {
case cc_always:
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
break;
case eq:
@@ -3880,11 +3880,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bltzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgtzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3896,14 +3896,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case greater_equal:
// rs >= rt
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
blezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bgezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3917,11 +3917,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgtzalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
bltzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -3933,14 +3933,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
case less_equal:
// rs <= r2
if (rs.code() == rt.rm().code()) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
return false;
bgezalc(scratch, offset);
} else if (IsZero(rt)) {
- if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
blezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
@@ -4202,10 +4202,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::Jump(const ExternalReference& reference) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch, reference);
- Jump(scratch);
+ li(t9, reference);
+ Jump(t9);
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@@ -4284,7 +4282,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index c2b701a5af..886d64e494 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -850,12 +850,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
- OffsetSize bits,
- Register& scratch, // NOLINT(runtime/references)
- const Operand& rt);
+ // TODO(mips) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 7dc94f39cd..de89371adb 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -111,15 +111,9 @@ OptimizedCompilationInfo::~OptimizedCompilationInfo() {
}
void OptimizedCompilationInfo::set_deferred_handles(
- std::shared_ptr<DeferredHandles> deferred_handles) {
+ std::unique_ptr<DeferredHandles> deferred_handles) {
DCHECK_NULL(deferred_handles_);
- deferred_handles_.swap(deferred_handles);
-}
-
-void OptimizedCompilationInfo::set_deferred_handles(
- DeferredHandles* deferred_handles) {
- DCHECK_NULL(deferred_handles_);
- deferred_handles_.reset(deferred_handles);
+ deferred_handles_ = std::move(deferred_handles);
}
void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
@@ -132,6 +126,7 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!closure_.is_null()) {
closure_ = Handle<JSFunction>(*closure_, isolate);
}
+ DCHECK(code_.is_null());
}
void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) {
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 624517283e..2f3afafc68 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -231,11 +231,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
osr_frame_ = osr_frame;
}
- void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
- void set_deferred_handles(DeferredHandles* deferred_handles);
- std::shared_ptr<DeferredHandles> deferred_handles() {
- return deferred_handles_;
- }
+ void set_deferred_handles(std::unique_ptr<DeferredHandles> deferred_handles);
void ReopenHandlesInNewHandleScope(Isolate* isolate);
@@ -330,7 +326,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// OptimizedCompilationInfo allocates.
Zone* zone_;
- std::shared_ptr<DeferredHandles> deferred_handles_;
+ std::unique_ptr<DeferredHandles> deferred_handles_;
BailoutReason bailout_reason_ = BailoutReason::kNoReason;
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
index b7be9c7775..84e36fc843 100644
--- a/deps/v8/src/codegen/pending-optimization-table.cc
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -83,7 +83,7 @@ void PendingOptimizationTable::MarkedForOptimization(
function->ShortPrint();
PrintF(
" should be prepared for optimization with "
- "%%PrepareFunctionForOptimize before "
+ "%%PrepareFunctionForOptimization before "
"%%OptimizeFunctionOnNextCall / %%OptimizeOSR ");
UNREACHABLE();
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index 166b9d4423..c55a5a9c0b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -144,7 +144,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 2a638af070..17a3aba1b2 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -200,8 +200,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
break;
}
case HeapObjectRequest::kStringConstant: {
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index dee264a75c..42eda72d4d 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -41,6 +41,7 @@
#define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
#include <stdio.h>
+#include <memory>
#include <vector>
#include "src/codegen/assembler.h"
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 4116206333..9e41dec2a8 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -1287,12 +1287,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
if (actual.is_reg()) {
- mr(r7, actual.reg());
+ ShiftLeftImm(r7, actual.reg(), Operand(kPointerSizeLog2));
+ LoadPX(r7, MemOperand(sp, r7));
} else {
- mov(r7, Operand(actual.immediate()));
+ LoadP(r7, MemOperand(sp, actual.immediate() << kPointerSizeLog2), r0);
}
- ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
- LoadPX(r7, MemOperand(sp, r7));
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
@@ -2409,51 +2408,51 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Cmpi(src1, Operand(smi), scratch, cr);
+#else
LoadSmiLiteral(scratch, smi);
cmp(src1, scratch, cr);
-#else
- Cmpi(src1, Operand(smi), scratch, cr);
#endif
}
void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Cmpli(src1, Operand(smi), scratch, cr);
+#else
LoadSmiLiteral(scratch, smi);
cmpl(src1, scratch, cr);
-#else
- Cmpli(src1, Operand(smi), scratch, cr);
#endif
}
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch);
+#else
LoadSmiLiteral(scratch, smi);
add(dst, src, scratch);
-#else
- Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
#endif
}
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch);
+#else
LoadSmiLiteral(scratch, smi);
sub(dst, src, scratch);
-#else
- Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
#endif
}
void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
Register scratch, RCBit rc) {
-#if V8_TARGET_ARCH_PPC64
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ And(dst, src, Operand(smi), rc);
+#else
LoadSmiLiteral(scratch, smi);
and_(dst, src, scratch, rc);
-#else
- And(dst, src, Operand(smi), rc);
#endif
}
@@ -2941,14 +2940,18 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ ShiftLeftImm(builtin_index, builtin_index,
+ Operand(kSystemPointerSizeLog2 - kSmiShift));
+#else
ShiftRightArithImm(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2);
+#endif
addi(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index fd4cb6014b..7ff5a6bb4b 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -876,12 +876,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void SmiToPtrArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
- ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
-#else
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
+ ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
#endif
}
@@ -895,7 +895,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
-#if V8_TARGET_ARCH_PPC64
+#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
diff --git a/deps/v8/src/codegen/reglist.h b/deps/v8/src/codegen/reglist.h
index 609e6b8845..4f1d35267d 100644
--- a/deps/v8/src/codegen/reglist.h
+++ b/deps/v8/src/codegen/reglist.h
@@ -25,20 +25,18 @@ constexpr int NumRegs(RegList list) {
return base::bits::CountPopulation(list);
}
+namespace detail {
// Combine two RegLists by building the union of the contained registers.
-// Implemented as a Functor to pass it to base::fold even on gcc < 5 (see
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892).
-// TODO(clemensh): Remove this once we require gcc >= 5.0.
-struct CombineRegListsFunctor {
- constexpr RegList operator()(RegList list1, RegList list2) const {
- return list1 | list2;
- }
-};
+// TODO(clemensb): Replace by constexpr lambda once we have C++17.
+constexpr RegList CombineRegListsHelper(RegList list1, RegList list2) {
+ return list1 | list2;
+}
+} // namespace detail
// Combine several RegLists by building the union of the contained registers.
template <typename... RegLists>
constexpr RegList CombineRegLists(RegLists... lists) {
- return base::fold(CombineRegListsFunctor{}, 0, lists...);
+ return base::fold(detail::CombineRegListsHelper, 0, lists...);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index a889a8b9c7..039a6746b1 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -366,7 +366,7 @@ void RelocInfo::set_target_address(Address target,
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
- IsCodeTargetMode(rmode_)) {
+ IsCodeTargetMode(rmode_) && !FLAG_disable_write_barriers) {
Code target_code = Code::GetCodeFromTargetAddress(target);
MarkingBarrierForCode(host(), this, target_code);
}
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index 5e7b193c8a..f911bdabf6 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -150,7 +150,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 873c0a2ad0..9de95ed508 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -329,8 +329,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- object = isolate->factory()->NewHeapNumber(request.heap_number(),
- AllocationType::kOld);
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
set_target_address_at(pc, kNullAddress, object.address(),
SKIP_ICACHE_FLUSH);
break;
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 0653e79b67..f1a418d1af 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -40,6 +40,7 @@
#ifndef V8_CODEGEN_S390_ASSEMBLER_S390_H_
#define V8_CODEGEN_S390_ASSEMBLER_S390_H_
#include <stdio.h>
+#include <memory>
#if V8_HOST_ARCH_S390
// elf.h include is required for auxv check for STFLE facility used
// for hardware detection, which is sensible only on s390 hosts.
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 355d536379..4cab44d9e1 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -51,7 +51,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
@@ -76,7 +76,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
MultiPushDoubles(kCallerSavedDoubles);
@@ -107,7 +107,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
return bytes;
}
@@ -116,8 +116,8 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
- const uint32_t offset =
- FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
+ const uint32_t offset = FixedArray::kHeaderSize +
+ constant_index * kSystemPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
@@ -258,7 +258,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Drop(int count) {
if (count > 0) {
- int total = count * kPointerSize;
+ int total = count * kSystemPointerSize;
if (is_uint12(total)) {
la(sp, MemOperand(sp, total));
} else if (is_int20(total)) {
@@ -270,7 +270,7 @@ void TurboAssembler::Drop(int count) {
}
void TurboAssembler::Drop(Register count, Register scratch) {
- ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
+ ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2));
AddP(sp, sp, scratch);
}
@@ -367,12 +367,12 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(regs);
- int16_t stack_offset = num_to_push * kPointerSize;
+ int16_t stack_offset = num_to_push * kSystemPointerSize;
SubP(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
StoreP(ToRegister(i), MemOperand(location, stack_offset));
}
}
@@ -384,7 +384,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
LoadP(ToRegister(i), MemOperand(location, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
AddP(location, location, Operand(stack_offset));
@@ -439,13 +439,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so so offset must be a multiple of kSystemPointerSize.
+ DCHECK(IsAligned(offset, kSystemPointerSize));
lay(dst, MemOperand(object, offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- AndP(r0, dst, Operand(kPointerSize - 1));
+ AndP(r0, dst, Operand(kSystemPointerSize - 1));
beq(&ok, Label::kNear);
stop();
bind(&ok);
@@ -632,7 +632,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
Push(r14, fp);
fp_delta = 0;
}
- la(fp, MemOperand(sp, fp_delta * kPointerSize));
+ la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
void TurboAssembler::PopCommonFrame(Register marker_reg) {
@@ -653,7 +653,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(r14, fp, cp);
fp_delta = 1;
}
- la(fp, MemOperand(sp, fp_delta * kPointerSize));
+ la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
void TurboAssembler::RestoreFrameStateForTailCall() {
@@ -1082,9 +1082,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
- DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
@@ -1117,7 +1117,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// since the sp slot and code slot were pushed after the fp.
}
- lay(sp, MemOperand(sp, -stack_space * kPointerSize));
+ lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
@@ -1127,11 +1127,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
}
- lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+ lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
StoreP(MemOperand(sp), Operand::Zero(), r0);
// Set the exit frame sp value to point just before the return address
// location.
- lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
+ lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize));
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1184,7 +1184,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) {
if (!argument_count_is_length) {
- ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
+ ShiftLeftP(argument_count, argument_count,
+ Operand(kSystemPointerSizeLog2));
}
la(sp, MemOperand(sp, argument_count));
}
@@ -1211,22 +1212,24 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
#endif
// Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We AddP kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
+ // after we drop current frame. We AddP kSystemPointerSize to count the
+ // receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
+ ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kSystemPointerSizeLog2));
AddP(dst_reg, fp, dst_reg);
AddP(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count_reg;
- // Calculate the end of source area. +kPointerSize is for the receiver.
+ // Calculate the end of source area. +kSystemPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
+ ShiftLeftP(src_reg, callee_args_count.reg(),
+ Operand(kSystemPointerSizeLog2));
AddP(src_reg, sp, src_reg);
- AddP(src_reg, src_reg, Operand(kPointerSize));
+ AddP(src_reg, src_reg, Operand(kSystemPointerSize));
} else {
- mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
+ mov(src_reg,
+ Operand((callee_args_count.immediate() + 1) * kSystemPointerSize));
AddP(src_reg, src_reg, sp);
}
@@ -1253,10 +1256,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
LoadRR(r1, tmp_reg);
bind(&loop);
- LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
- StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
- lay(src_reg, MemOperand(src_reg, -kPointerSize));
- lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
+ LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
+ StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
+ lay(src_reg, MemOperand(src_reg, -kSystemPointerSize));
+ lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize));
BranchOnCount(r1, &loop);
// Leave current frame.
@@ -1342,12 +1345,12 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
if (actual.is_reg()) {
- LoadRR(r6, actual.reg());
+ ShiftLeftP(r6, actual.reg(), Operand(kSystemPointerSizeLog2));
+ LoadP(r6, MemOperand(sp, r6));
} else {
- mov(r6, Operand(actual.immediate()));
+ LoadP(r6, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2),
+ ip);
}
- ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
- LoadP(r6, MemOperand(sp, r6));
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
@@ -1470,8 +1473,8 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
// Link the current handler as the next handler.
Move(r7,
@@ -1486,13 +1489,13 @@ void MacroAssembler::PushStackHandler() {
// Copy the old handler into the next handler slot.
MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
- Operand(kPointerSize));
+ Operand(kSystemPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
}
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Pop the Next Handler into r3 and store it into Handler Address reference.
@@ -1839,18 +1842,19 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
LoadRR(scratch, sp);
- lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
+ lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
- StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
+ StoreP(scratch,
+ MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize));
} else {
stack_space += stack_passed_arguments;
}
- lay(sp, MemOperand(sp, (-stack_space) * kPointerSize));
+ lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
@@ -1940,11 +1944,11 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
- if (ActivationFrameAlignment() > kPointerSize) {
+ if (ActivationFrameAlignment() > kSystemPointerSize) {
// Load the original stack pointer (pre-alignment) from the stack
- LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
- la(sp, MemOperand(sp, stack_space * kPointerSize));
+ la(sp, MemOperand(sp, stack_space * kSystemPointerSize));
}
}
@@ -1962,20 +1966,20 @@ void TurboAssembler::CheckPageFlag(
uint32_t shifted_mask = mask;
// Determine the byte offset to be tested
if (mask <= 0x80) {
- byte_offset = kPointerSize - 1;
+ byte_offset = kSystemPointerSize - 1;
} else if (mask < 0x8000) {
- byte_offset = kPointerSize - 2;
+ byte_offset = kSystemPointerSize - 2;
shifted_mask = mask >> 8;
} else if (mask < 0x800000) {
- byte_offset = kPointerSize - 3;
+ byte_offset = kSystemPointerSize - 3;
shifted_mask = mask >> 16;
} else {
- byte_offset = kPointerSize - 4;
+ byte_offset = kSystemPointerSize - 4;
shifted_mask = mask >> 24;
}
#if V8_TARGET_LITTLE_ENDIAN
// Reverse the byte_offset if emulating on little endian platform
- byte_offset = kPointerSize - byte_offset - 1;
+ byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
@@ -3415,12 +3419,12 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
intptr_t value = static_cast<intptr_t>(smi.ptr());
-#if V8_TARGET_ARCH_S390X
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ llilf(dst, Operand(value));
+#else
DCHECK_EQ(value & 0xFFFFFFFF, 0);
// The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
llihf(dst, Operand(value >> 32));
-#else
- llilf(dst, Operand(value));
#endif
}
@@ -3456,16 +3460,16 @@ void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
}
void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
-#if V8_TARGET_ARCH_S390X
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ // CFI takes 32-bit immediate.
+ cfi(src1, Operand(smi));
+#else
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32));
} else {
LoadSmiLiteral(scratch, smi);
cgr(src1, scratch);
}
-#else
- // CFI takes 32-bit immediate.
- cfi(src1, Operand(smi));
#endif
}
@@ -4154,7 +4158,7 @@ void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
// Clear right most # of bits
void TurboAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
- int numBitsToClear = val.immediate() % (kPointerSize * 8);
+ int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
// Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -4342,14 +4346,19 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
- STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ShiftLeftP(builtin_index, builtin_index,
+ Operand(kSystemPointerSizeLog2 - kSmiShift));
+#else
ShiftRightArithP(builtin_index, builtin_index,
Operand(kSmiShift - kSystemPointerSizeLog2));
+#endif
AddP(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
LoadP(builtin_index, MemOperand(kRootRegister, builtin_index));
@@ -4427,7 +4436,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Label return_label;
larl(r14, &return_label); // Generate the return addr of call later.
- StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
// zLinux ABI requires caller's frame to have sufficient space for callee
// preserved regsiter save area.
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 856e4b592e..06c26cb305 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -515,26 +515,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void push(DoubleRegister src) {
- lay(sp, MemOperand(sp, -kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreDouble(src, MemOperand(sp));
}
void push(Register src) {
- lay(sp, MemOperand(sp, -kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreP(src, MemOperand(sp));
}
void pop(DoubleRegister dst) {
LoadDouble(dst, MemOperand(sp));
- la(sp, MemOperand(sp, kPointerSize));
+ la(sp, MemOperand(sp, kSystemPointerSize));
}
void pop(Register dst) {
LoadP(dst, MemOperand(sp));
- la(sp, MemOperand(sp, kPointerSize));
+ la(sp, MemOperand(sp, kSystemPointerSize));
}
- void pop() { la(sp, MemOperand(sp, kPointerSize)); }
+ void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); }
void Push(Register src) { push(src); }
@@ -544,25 +544,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- lay(sp, MemOperand(sp, -kPointerSize * 2));
- StoreP(src1, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 2));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize));
StoreP(src2, MemOperand(sp, 0));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- lay(sp, MemOperand(sp, -kPointerSize * 3));
- StoreP(src1, MemOperand(sp, kPointerSize * 2));
- StoreP(src2, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 3));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize * 2));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize));
StoreP(src3, MemOperand(sp, 0));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- lay(sp, MemOperand(sp, -kPointerSize * 4));
- StoreP(src1, MemOperand(sp, kPointerSize * 3));
- StoreP(src2, MemOperand(sp, kPointerSize * 2));
- StoreP(src3, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 4));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize * 3));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize * 2));
+ StoreP(src3, MemOperand(sp, kSystemPointerSize));
StoreP(src4, MemOperand(sp, 0));
}
@@ -580,11 +580,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(src3 != src5);
DCHECK(src4 != src5);
- lay(sp, MemOperand(sp, -kPointerSize * 5));
- StoreP(src1, MemOperand(sp, kPointerSize * 4));
- StoreP(src2, MemOperand(sp, kPointerSize * 3));
- StoreP(src3, MemOperand(sp, kPointerSize * 2));
- StoreP(src4, MemOperand(sp, kPointerSize));
+ lay(sp, MemOperand(sp, -kSystemPointerSize * 5));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize * 4));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize * 3));
+ StoreP(src3, MemOperand(sp, kSystemPointerSize * 2));
+ StoreP(src4, MemOperand(sp, kSystemPointerSize));
StoreP(src5, MemOperand(sp, 0));
}
@@ -593,36 +593,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
- LoadP(src1, MemOperand(sp, kPointerSize));
- la(sp, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src1, MemOperand(sp, kSystemPointerSize));
+ la(sp, MemOperand(sp, 2 * kSystemPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
- LoadP(src2, MemOperand(sp, kPointerSize));
- LoadP(src1, MemOperand(sp, 2 * kPointerSize));
- la(sp, MemOperand(sp, 3 * kPointerSize));
+ LoadP(src2, MemOperand(sp, kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ la(sp, MemOperand(sp, 3 * kSystemPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
- LoadP(src3, MemOperand(sp, kPointerSize));
- LoadP(src2, MemOperand(sp, 2 * kPointerSize));
- LoadP(src1, MemOperand(sp, 3 * kPointerSize));
- la(sp, MemOperand(sp, 4 * kPointerSize));
+ LoadP(src3, MemOperand(sp, kSystemPointerSize));
+ LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ la(sp, MemOperand(sp, 4 * kSystemPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
- LoadP(src4, MemOperand(sp, kPointerSize));
- LoadP(src3, MemOperand(sp, 2 * kPointerSize));
- LoadP(src2, MemOperand(sp, 3 * kPointerSize));
- LoadP(src1, MemOperand(sp, 4 * kPointerSize));
- la(sp, MemOperand(sp, 5 * kPointerSize));
+ LoadP(src4, MemOperand(sp, kSystemPointerSize));
+ LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ la(sp, MemOperand(sp, 5 * kSystemPointerSize));
}
// Push a fixed frame, consisting of lr, fp, constant pool.
@@ -1182,12 +1182,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void SmiToPtrArrayOffset(Register dst, Register src) {
-#if V8_TARGET_ARCH_S390X
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
- ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
+#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
+ ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
- ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
+ ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
#endif
}
@@ -1201,14 +1201,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
-#if V8_TARGET_ARCH_S390X
+#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_LITTLE_ENDIAN
-#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 870241eac6..ba8e5981f0 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -31,24 +31,23 @@ using MoreBit = BitField8<bool, 7, 1>;
using ValueBits = BitField8<unsigned, 0, 7>;
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
-void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references)
+void AddAndSetEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
- value.code_offset += other.code_offset;
- value.source_position += other.source_position;
- value.is_statement = other.is_statement;
+ value->code_offset += other.code_offset;
+ value->source_position += other.source_position;
+ value->is_statement = other.is_statement;
}
// Helper: Subtract the offsets from 'other' from 'value'.
-void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references)
+void SubtractFromEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
- value.code_offset -= other.code_offset;
- value.source_position -= other.source_position;
+ value->code_offset -= other.code_offset;
+ value->source_position -= other.source_position;
}
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
- T value) {
+void EncodeInt(std::vector<byte>* bytes, T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
@@ -60,14 +59,13 @@ void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
more = encoded > ValueBits::kMax;
byte current =
MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask);
- bytes.push_back(current);
+ bytes->push_back(current);
encoded >>= ValueBits::kSize;
} while (more);
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references)
- const PositionTableEntry& entry) {
+void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -115,17 +113,16 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(
- std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references)
- SourcePositionTableIterator& encoded) { // NOLINT(runtime/references)
+void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
+ SourcePositionTableIterator* encoded) {
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
auto raw = raw_entries.begin();
- for (; !encoded.done(); encoded.Advance(), raw++) {
+ for (; !encoded->done(); encoded->Advance(), raw++) {
DCHECK(raw != raw_entries.end());
- DCHECK_EQ(encoded.code_offset(), raw->code_offset);
- DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
- DCHECK_EQ(encoded.is_statement(), raw->is_statement);
+ DCHECK_EQ(encoded->code_offset(), raw->code_offset);
+ DCHECK_EQ(encoded->source_position().raw(), raw->source_position);
+ DCHECK_EQ(encoded->is_statement(), raw->is_statement);
}
DCHECK(raw == raw_entries.end());
}
@@ -148,8 +145,8 @@ void SourcePositionTableBuilder::AddPosition(size_t code_offset,
void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
PositionTableEntry tmp(entry);
- SubtractFromEntry(tmp, previous_);
- EncodeEntry(bytes_, tmp);
+ SubtractFromEntry(&tmp, previous_);
+ EncodeEntry(&bytes_, tmp);
previous_ = entry;
#ifdef ENABLE_SLOW_DCHECKS
raw_entries_.push_back(entry);
@@ -169,7 +166,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll);
- CheckTableEquals(raw_entries_, it);
+ CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
@@ -187,7 +184,7 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
// the entire table to verify they are identical.
SourcePositionTableIterator it(table.as_vector(),
SourcePositionTableIterator::kAll);
- CheckTableEquals(raw_entries_, it);
+ CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
@@ -232,7 +229,7 @@ void SourcePositionTableIterator::Advance() {
} else {
PositionTableEntry tmp;
DecodeEntry(bytes, &index_, &tmp);
- AddAndSetEntry(current_, tmp);
+ AddAndSetEntry(&current_, tmp);
SourcePosition p = source_position();
filter_satisfied = (filter_ == kAll) ||
(filter_ == kJavaScriptOnly && p.IsJavaScript()) ||
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
new file mode 100644
index 0000000000..1f6c627929
--- /dev/null
+++ b/deps/v8/src/codegen/tnode.h
@@ -0,0 +1,374 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_TNODE_H_
+#define V8_CODEGEN_TNODE_H_
+
+#include "src/codegen/machine-type.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapNumber;
+class BigInt;
+class Object;
+
+namespace compiler {
+
+class Node;
+
+}
+
+struct UntaggedT {};
+
+struct IntegralT : UntaggedT {};
+
+struct WordT : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ (kSystemPointerSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+};
+
+struct RawPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::Pointer();
+};
+
+template <class To>
+struct RawPtr : RawPtrT {};
+
+struct Word32T : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kWord32;
+};
+struct Int32T : Word32T {
+ static constexpr MachineType kMachineType = MachineType::Int32();
+};
+struct Uint32T : Word32T {
+ static constexpr MachineType kMachineType = MachineType::Uint32();
+};
+struct Int16T : Int32T {
+ static constexpr MachineType kMachineType = MachineType::Int16();
+};
+struct Uint16T : Uint32T, Int32T {
+ static constexpr MachineType kMachineType = MachineType::Uint16();
+};
+struct Int8T : Int16T {
+ static constexpr MachineType kMachineType = MachineType::Int8();
+};
+struct Uint8T : Uint16T, Int16T {
+ static constexpr MachineType kMachineType = MachineType::Uint8();
+};
+
+struct Word64T : IntegralT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kWord64;
+};
+struct Int64T : Word64T {
+ static constexpr MachineType kMachineType = MachineType::Int64();
+};
+struct Uint64T : Word64T {
+ static constexpr MachineType kMachineType = MachineType::Uint64();
+};
+
+struct IntPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::IntPtr();
+};
+struct UintPtrT : WordT {
+ static constexpr MachineType kMachineType = MachineType::UintPtr();
+};
+
+struct Float32T : UntaggedT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kFloat32;
+ static constexpr MachineType kMachineType = MachineType::Float32();
+};
+
+struct Float64T : UntaggedT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineRepresentation::kFloat64;
+ static constexpr MachineType kMachineType = MachineType::Float64();
+};
+
+#ifdef V8_COMPRESS_POINTERS
+using TaggedT = Int32T;
+#else
+using TaggedT = IntPtrT;
+#endif
+
+// Result of a comparison operation.
+struct BoolT : Word32T {};
+
+// Value type of a Turbofan node with two results.
+template <class T1, class T2>
+struct PairT {};
+
+inline constexpr MachineType CommonMachineType(MachineType type1,
+ MachineType type2) {
+ return (type1 == type2) ? type1
+ : ((type1.IsTagged() && type2.IsTagged())
+ ? MachineType::AnyTagged()
+ : MachineType::None());
+}
+
+template <class Type, class Enable = void>
+struct MachineTypeOf {
+ static constexpr MachineType value = Type::kMachineType;
+};
+
+template <class Type, class Enable>
+constexpr MachineType MachineTypeOf<Type, Enable>::value;
+
+template <>
+struct MachineTypeOf<Object> {
+ static constexpr MachineType value = MachineType::AnyTagged();
+};
+template <>
+struct MachineTypeOf<MaybeObject> {
+ static constexpr MachineType value = MachineType::AnyTagged();
+};
+template <>
+struct MachineTypeOf<Smi> {
+ static constexpr MachineType value = MachineType::TaggedSigned();
+};
+template <class HeapObjectSubtype>
+struct MachineTypeOf<HeapObjectSubtype,
+ typename std::enable_if<std::is_base_of<
+ HeapObject, HeapObjectSubtype>::value>::type> {
+ static constexpr MachineType value = MachineType::TaggedPointer();
+};
+
+template <class HeapObjectSubtype>
+constexpr MachineType MachineTypeOf<
+ HeapObjectSubtype, typename std::enable_if<std::is_base_of<
+ HeapObject, HeapObjectSubtype>::value>::type>::value;
+
+template <class Type, class Enable = void>
+struct MachineRepresentationOf {
+ static const MachineRepresentation value = Type::kMachineRepresentation;
+};
+template <class T>
+struct MachineRepresentationOf<
+ T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
+ static const MachineRepresentation value =
+ MachineTypeOf<T>::value.representation();
+};
+template <class T>
+struct MachineRepresentationOf<
+ T, typename std::enable_if<std::is_base_of<MaybeObject, T>::value>::type> {
+ static const MachineRepresentation value =
+ MachineTypeOf<T>::value.representation();
+};
+template <>
+struct MachineRepresentationOf<ExternalReference> {
+ static const MachineRepresentation value = RawPtrT::kMachineRepresentation;
+};
+
+template <class T>
+struct is_valid_type_tag {
+ static const bool value = std::is_base_of<Object, T>::value ||
+ std::is_base_of<UntaggedT, T>::value ||
+ std::is_base_of<MaybeObject, T>::value ||
+ std::is_same<ExternalReference, T>::value;
+ static const bool is_tagged = std::is_base_of<Object, T>::value ||
+ std::is_base_of<MaybeObject, T>::value;
+};
+
+template <class T1, class T2>
+struct is_valid_type_tag<PairT<T1, T2>> {
+ static const bool value =
+ is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value;
+ static const bool is_tagged = false;
+};
+
+template <class T1, class T2>
+struct UnionT;
+
+template <class T1, class T2>
+struct is_valid_type_tag<UnionT<T1, T2>> {
+ static const bool is_tagged =
+ is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged;
+ static const bool value = is_tagged;
+};
+
+template <class T1, class T2>
+struct UnionT {
+ static constexpr MachineType kMachineType =
+ CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value);
+ static const MachineRepresentation kMachineRepresentation =
+ kMachineType.representation();
+ static_assert(kMachineRepresentation != MachineRepresentation::kNone,
+ "no common representation");
+ static_assert(is_valid_type_tag<T1>::is_tagged &&
+ is_valid_type_tag<T2>::is_tagged,
+ "union types are only possible for tagged values");
+};
+
+using AnyTaggedT = UnionT<Object, MaybeObject>;
+using Number = UnionT<Smi, HeapNumber>;
+using Numeric = UnionT<Number, BigInt>;
+
+// A pointer to a builtin function, used by Torque's function pointers.
+using BuiltinPtr = Smi;
+
+class int31_t {
+ public:
+ int31_t() : value_(0) {}
+ int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ }
+ int31_t& operator=(int value) {
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ value_ = value;
+ return *this;
+ }
+ int32_t value() const { return value_; }
+ operator int32_t() const { return value_; }
+
+ private:
+ int32_t value_;
+};
+
+template <class T, class U>
+struct is_subtype {
+ static const bool value = std::is_base_of<U, T>::value;
+};
+template <class T1, class T2, class U>
+struct is_subtype<UnionT<T1, T2>, U> {
+ static const bool value =
+ is_subtype<T1, U>::value && is_subtype<T2, U>::value;
+};
+template <class T, class U1, class U2>
+struct is_subtype<T, UnionT<U1, U2>> {
+ static const bool value =
+ is_subtype<T, U1>::value || is_subtype<T, U2>::value;
+};
+template <class T1, class T2, class U1, class U2>
+struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> {
+ static const bool value =
+ (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) &&
+ (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value);
+};
+
+template <class T, class U>
+struct types_have_common_values {
+ static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
+};
+template <class U>
+struct types_have_common_values<BoolT, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Uint32T, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Int32T, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Uint64T, U> {
+ static const bool value = types_have_common_values<Word64T, U>::value;
+};
+template <class U>
+struct types_have_common_values<Int64T, U> {
+ static const bool value = types_have_common_values<Word64T, U>::value;
+};
+template <class U>
+struct types_have_common_values<IntPtrT, U> {
+ static const bool value = types_have_common_values<WordT, U>::value;
+};
+template <class U>
+struct types_have_common_values<UintPtrT, U> {
+ static const bool value = types_have_common_values<WordT, U>::value;
+};
+template <class T1, class T2, class U>
+struct types_have_common_values<UnionT<T1, T2>, U> {
+ static const bool value = types_have_common_values<T1, U>::value ||
+ types_have_common_values<T2, U>::value;
+};
+
+template <class T, class U1, class U2>
+struct types_have_common_values<T, UnionT<U1, U2>> {
+ static const bool value = types_have_common_values<T, U1>::value ||
+ types_have_common_values<T, U2>::value;
+};
+template <class T1, class T2, class U1, class U2>
+struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> {
+ static const bool value = types_have_common_values<T1, U1>::value ||
+ types_have_common_values<T1, U2>::value ||
+ types_have_common_values<T2, U1>::value ||
+ types_have_common_values<T2, U2>::value;
+};
+
+template <class T>
+struct types_have_common_values<T, MaybeObject> {
+ static const bool value = types_have_common_values<T, Object>::value;
+};
+
+template <class T>
+struct types_have_common_values<MaybeObject, T> {
+ static const bool value = types_have_common_values<Object, T>::value;
+};
+
+// TNode<T> is an SSA value with the static type tag T, which is one of the
+// following:
+// - a subclass of internal::Object represents a tagged type
+// - a subclass of internal::UntaggedT represents an untagged type
+// - ExternalReference
+// - PairT<T1, T2> for an operation returning two values, with types T1
+// and T2
+// - UnionT<T1, T2> represents either a value of type T1 or of type T2.
+template <class T>
+class TNode {
+ public:
+ template <class U,
+ typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
+ TNode(const TNode<U>& other) : node_(other) {
+ LazyTemplateChecks();
+ }
+ TNode() : TNode(nullptr) {}
+
+ TNode operator=(TNode other) {
+ DCHECK_NOT_NULL(other.node_);
+ node_ = other.node_;
+ return *this;
+ }
+
+ bool is_null() { return node_ == nullptr; }
+
+ operator compiler::Node*() const { return node_; }
+
+ static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
+
+ protected:
+ explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
+
+ private:
+ // These checks shouldn't be checked before TNode is actually used.
+ void LazyTemplateChecks() {
+ static_assert(is_valid_type_tag<T>::value, "invalid type tag");
+ }
+
+ compiler::Node* node_;
+};
+
+// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
+// Node*. It is intended for function arguments as long as some call sites
+// still use untyped Node* arguments.
+// TODO(tebbi): Delete this class once transition is finished.
+template <class T>
+class SloppyTNode : public TNode<T> {
+ public:
+ SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
+ : TNode<T>(node) {}
+ template <class U, typename std::enable_if<is_subtype<U, T>::value,
+ int>::type = 0>
+ SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit)
+ : TNode<T>(other) {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_TNODE_H_
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 3a3e65a41e..c0f833b652 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -5,6 +5,8 @@
#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_
#define V8_CODEGEN_TURBO_ASSEMBLER_H_
+#include <memory>
+
#include "src/base/template-utils.h"
#include "src/builtins/builtins.h"
#include "src/codegen/assembler-arch.h"
@@ -100,7 +102,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
-#if V8_OS_WIN
+#if V8_TARGET_OS_WIN
// Minimum page size. We must touch memory once per page when expanding the
// stack, to avoid access violations.
static constexpr int kStackPageSize = 4 * KB;
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index f5d0c0ffcf..d8457d9d3e 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -218,6 +218,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
+ DCHECK(is_int32(target - pc - 4));
WriteUnalignedValue(pc, static_cast<int32_t>(target - pc - 4));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc, sizeof(int32_t));
@@ -363,7 +364,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1783da700b..16791a6453 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -327,8 +327,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
- Handle<HeapNumber> object = isolate->factory()->NewHeapNumber(
- request.heap_number(), AllocationType::kOld);
+ Handle<HeapNumber> object =
+ isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
WriteUnalignedValue(pc, object);
break;
}
@@ -1777,6 +1778,13 @@ void Assembler::emit_mov(Register dst, Immediate64 value, int size) {
}
}
+void Assembler::movq_imm64(Register dst, int64_t value) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kInt64Size);
+ emit(0xB8 | dst.low_bits());
+ emitq(static_cast<uint64_t>(value));
+}
+
void Assembler::movq_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
emit_rex(dst, kInt64Size);
@@ -1963,6 +1971,13 @@ void Assembler::emit_repmovs(int size) {
emit(0xA5);
}
+void Assembler::repstosq() {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64();
+ emit(0xAB);
+}
+
void Assembler::mull(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
@@ -4099,6 +4114,42 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
+void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ DCHECK(IsEnabled(FMA3));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vmovd(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 7c69b4c473..74cfd0ab85 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -39,6 +39,7 @@
#include <deque>
#include <map>
+#include <memory>
#include <vector>
#include "src/codegen/assembler.h"
@@ -155,7 +156,9 @@ enum ScaleFactor : int8_t {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
+
+ times_half_system_pointer_size = times_4,
+ times_system_pointer_size = times_8,
times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
};
@@ -513,12 +516,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movq_string(Register dst, const StringConstantBase* str);
- // Loads a 64-bit immediate into a register.
+ // Loads a 64-bit immediate into a register, potentially using the constant
+ // pool.
void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
void movq(Register dst, uint64_t value) {
movq(dst, Immediate64(static_cast<int64_t>(value)));
}
+ // Loads a 64-bit immediate into a register without using the constant pool.
+ void movq_imm64(Register dst, int64_t value);
+
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, Operand src);
void movsxbq(Register dst, Register src);
@@ -531,12 +538,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movsxlq(Register dst, Operand src);
// Repeated moves.
-
void repmovsb();
void repmovsw();
void repmovsl() { emit_repmovs(kInt32Size); }
void repmovsq() { emit_repmovs(kInt64Size); }
+ // Repeated store of quadwords (fill RCX quadwords at [RDI] with RAX).
+ void repstosq();
+
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(Address value, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1295,6 +1304,36 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmaps(0xb8, dst, src1, src2);
+ }
+ void vfmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmaps(0xb8, dst, src1, src2);
+ }
+ void vfnmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmaps(0xbc, dst, src1, src2);
+ }
+ void vfnmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmaps(0xbc, dst, src1, src2);
+ }
+ void vfmaps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vfmaps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+
+ void vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmapd(0xb8, dst, src1, src2);
+ }
+ void vfmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmapd(0xb8, dst, src1, src2);
+ }
+ void vfnmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vfmapd(0xbc, dst, src1, src2);
+ }
+ void vfnmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vfmapd(0xbc, dst, src1, src2);
+ }
+ void vfmapd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vfmapd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+
void vmovd(XMMRegister dst, Register src);
void vmovd(XMMRegister dst, Operand src);
void vmovd(Register dst, XMMRegister src);
@@ -1330,7 +1369,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
impl(opcode, dst, src1, src2); \
}
- AVX_SP_3(vsqrt, 0x51)
+ // vsqrtpd is defined by sqrtpd in SSE2_INSTRUCTION_LIST
+ AVX_S_3(vsqrt, 0x51)
+ AVX_3(vsqrtps, 0x51, vps)
AVX_S_3(vadd, 0x58)
AVX_S_3(vsub, 0x5c)
AVX_S_3(vmul, 0x59)
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 4deeb1bc02..d02b95b38e 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -218,45 +218,45 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
void TurboAssembler::LoadTaggedPointerField(Register destination,
Operand field_operand) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressTaggedPointer(destination, field_operand);
-#else
- mov_tagged(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ mov_tagged(destination, field_operand);
+ }
}
void TurboAssembler::LoadAnyTaggedField(Register destination,
Operand field_operand,
Register scratch) {
-#ifdef V8_COMPRESS_POINTERS
- DecompressAnyTagged(destination, field_operand, scratch);
-#else
- mov_tagged(destination, field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand, scratch);
+ } else {
+ mov_tagged(destination, field_operand);
+ }
}
void TurboAssembler::PushTaggedPointerField(Operand field_operand,
Register scratch) {
-#ifdef V8_COMPRESS_POINTERS
- DCHECK(!field_operand.AddressUsesRegister(scratch));
- DecompressTaggedPointer(scratch, field_operand);
- Push(scratch);
-#else
- Push(field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(!field_operand.AddressUsesRegister(scratch));
+ DecompressTaggedPointer(scratch, field_operand);
+ Push(scratch);
+ } else {
+ Push(field_operand);
+ }
}
void TurboAssembler::PushTaggedAnyField(Operand field_operand,
Register scratch1, Register scratch2) {
-#ifdef V8_COMPRESS_POINTERS
- DCHECK(!AreAliased(scratch1, scratch2));
- DCHECK(!field_operand.AddressUsesRegister(scratch1));
- DCHECK(!field_operand.AddressUsesRegister(scratch2));
- DecompressAnyTagged(scratch1, field_operand, scratch2);
- Push(scratch1);
-#else
- Push(field_operand);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(!AreAliased(scratch1, scratch2));
+ DCHECK(!field_operand.AddressUsesRegister(scratch1));
+ DCHECK(!field_operand.AddressUsesRegister(scratch2));
+ DecompressAnyTagged(scratch1, field_operand, scratch2);
+ Push(scratch1);
+ } else {
+ Push(field_operand);
+ }
}
void TurboAssembler::SmiUntagField(Register dst, Operand src) {
@@ -265,44 +265,40 @@ void TurboAssembler::SmiUntagField(Register dst, Operand src) {
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Immediate value) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- movl(dst_field_operand, value);
- RecordComment("]");
-#else
- movq(dst_field_operand, value);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(dst_field_operand, value);
+ } else {
+ movq(dst_field_operand, value);
+ }
}
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Register value) {
-#ifdef V8_COMPRESS_POINTERS
- RecordComment("[ StoreTagged");
- movl(dst_field_operand, value);
- RecordComment("]");
-#else
- movq(dst_field_operand, value);
-#endif
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(dst_field_operand, value);
+ } else {
+ movq(dst_field_operand, value);
+ }
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedSigned");
- movsxlq(destination, field_operand);
+ movl(destination, field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Register source) {
RecordComment("[ DecompressTaggedSigned");
- movsxlq(destination, source);
+ movl(destination, source);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
- movsxlq(destination, field_operand);
+ movl(destination, field_operand);
addq(destination, kRootRegister);
RecordComment("]");
}
@@ -310,30 +306,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
- movsxlq(destination, source);
+ movl(destination, source);
addq(destination, kRootRegister);
RecordComment("]");
}
void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
Register scratch) {
- if (kUseBranchlessPtrDecompressionInGeneratedCode) {
- // Branchlessly compute |masked_root|:
- // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
- Register masked_root = scratch;
- xorq(masked_root, masked_root);
- Condition smi = CheckSmi(destination);
- cmovq(NegateCondition(smi), masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is
- // a smi or add the isolate root if it is a heap object.
- addq(destination, masked_root);
- } else {
- Label done;
- JumpIfSmi(destination, &done);
- addq(destination, kRootRegister);
- bind(&done);
- }
+ addq(destination, kRootRegister);
}
void TurboAssembler::DecompressAnyTagged(Register destination,
@@ -341,7 +321,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
- movsxlq(destination, field_operand);
+ movl(destination, field_operand);
DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
@@ -350,7 +330,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register source,
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
- movsxlq(destination, source);
+ movl(destination, source);
DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
@@ -1109,7 +1089,11 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
xorl(kScratchRegister, kScratchRegister);
return kScratchRegister;
}
- Move(kScratchRegister, source);
+ if (SmiValuesAre32Bits()) {
+ Move(kScratchRegister, source);
+ } else {
+ movl(kScratchRegister, Immediate(source));
+ }
return kScratchRegister;
}
@@ -1133,20 +1117,47 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
-void MacroAssembler::SmiTag(Register dst, Register src) {
+void MacroAssembler::SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
- if (dst != src) {
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ shll(reg, Immediate(kSmiShift));
+ } else {
+ shlq(reg, Immediate(kSmiShift));
+ }
+}
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ DCHECK(dst != src);
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(dst, src);
+ } else {
movq(dst, src);
}
+ SmiTag(dst);
+}
+
+void TurboAssembler::SmiUntag(Register reg) {
+ STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- shlq(dst, Immediate(kSmiShift));
+ // TODO(v8:7703): Is there a way to avoid this sign extension when pointer
+ // compression is enabled?
+ if (COMPRESS_POINTERS_BOOL) {
+ movsxlq(reg, reg);
+ }
+ sarq(reg, Immediate(kSmiShift));
}
void TurboAssembler::SmiUntag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (dst != src) {
+ DCHECK(dst != src);
+ if (COMPRESS_POINTERS_BOOL) {
+ movsxlq(dst, src);
+ } else {
movq(dst, src);
}
+ // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra
+ // mov when pointer compression is enabled.
+ STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
sarq(dst, Immediate(kSmiShift));
}
@@ -1158,12 +1169,13 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
movsxlq(dst, dst);
} else {
DCHECK(SmiValuesAre31Bits());
-#ifdef V8_COMPRESS_POINTERS
- movsxlq(dst, src);
-#else
- movq(dst, src);
-#endif
- sarq(dst, Immediate(kSmiShift));
+ if (COMPRESS_POINTERS_BOOL) {
+ movsxlq(dst, src);
+ sarq(dst, Immediate(kSmiShift));
+ } else {
+ movq(dst, src);
+ sarq(dst, Immediate(kSmiShift));
+ }
}
}
@@ -1283,12 +1295,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
return SmiIndex(dst, times_1);
} else {
DCHECK(SmiValuesAre31Bits());
- if (dst != src) {
- mov_tagged(dst, src);
- }
// We have to sign extend the index register to 64-bit as the SMI might
// be negative.
- movsxlq(dst, dst);
+ movsxlq(dst, src);
if (shift < kSmiShift) {
sarq(dst, Immediate(kSmiShift - shift));
} else if (shift != kSmiShift) {
@@ -1423,7 +1432,6 @@ void MacroAssembler::Negpd(XMMRegister dst) {
}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
@@ -1433,7 +1441,6 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
}
void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
- AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
@@ -1463,6 +1470,8 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
+ // TODO(v8:9706): Fix-it! This load will always uncompress the value
+ // even when we are loading a compressed embedded object.
IndirectLoadConstant(result, object);
return;
}
@@ -1605,26 +1614,20 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
-
- // The builtin_index register contains the builtin index as a Smi.
- // Untagging is folded into the indexing operand below (we use times_4 instead
- // of times_8 since smis are already shifted by one).
- return Operand(kRootRegister, builtin_index, times_4,
- IsolateData::builtin_entry_table_offset());
-#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiShiftSize == 31);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index);
+ return Operand(kRootRegister, builtin_index, times_system_pointer_size,
+ IsolateData::builtin_entry_table_offset());
+ } else {
+ DCHECK(SmiValuesAre31Bits());
- // The builtin_index register contains the builtin index as a Smi.
- SmiUntag(builtin_index, builtin_index);
- return Operand(kRootRegister, builtin_index, times_8,
- IsolateData::builtin_entry_table_offset());
-#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ // The builtin_index register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below (we use
+ // times_half_system_pointer_size since smis are already shifted by one).
+ return Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
+ IsolateData::builtin_entry_table_offset());
+ }
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -1739,7 +1742,11 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
Movd(dst, src);
return;
}
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrd(dst, src, imm8);
+ return;
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrd(dst, src, imm8);
return;
@@ -1749,8 +1756,38 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
shrq(dst, Immediate(32));
}
+void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrw(dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrw(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrb(dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrb(dst, src, imm8);
+ return;
+ }
+}
+
void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst, dst, src, imm8);
+ return;
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
@@ -1765,7 +1802,11 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst, dst, src, imm8);
+ return;
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
@@ -1779,6 +1820,56 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
}
}
+void TurboAssembler::Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst, dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrw(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst, dst, src, imm8);
+ return;
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrw(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrb(dst, dst, src, imm8);
+ return;
+ } else {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrb(dst, src, imm8);
+ return;
+ }
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrb(dst, dst, src, imm8);
+ return;
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrb(dst, src, imm8);
+ return;
+ }
+}
+
void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1819,6 +1910,16 @@ void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
}
}
+void TurboAssembler::Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpshufd(dst, src, shuffle);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ pshufd(dst, src, shuffle);
+ }
+}
+
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2278,7 +2379,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected, actual);
+ Label debug_hook, continue_after_hook;
+ {
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ Operand debug_hook_active_operand =
+ ExternalReferenceAsOperand(debug_hook_active);
+ cmpb(debug_hook_active_operand, Immediate(0));
+ j(not_equal, &debug_hook, Label::kNear);
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -2302,8 +2412,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(flag == JUMP_FUNCTION);
JumpCodeObject(rcx);
}
- bind(&done);
}
+ jmp(&done, Label::kNear);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected, actual);
+ jmp(&continue_after_hook, Label::kNear);
+
+ bind(&done);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -2368,50 +2485,38 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- Operand debug_hook_active_operand =
- ExternalReferenceAsOperand(debug_hook_active);
- cmpb(debug_hook_active_operand, Immediate(0));
- j(equal, &skip_hook);
-
- {
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg(), expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg(), actual.reg());
- Push(actual.reg());
- SmiUntag(actual.reg(), actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg(), actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg(), expected.reg());
- }
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
}
- bind(&skip_hook);
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
@@ -2443,7 +2548,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
-#ifdef V8_OS_WIN
+#ifdef V8_TARGET_OS_WIN
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
// In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the
@@ -2511,7 +2616,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
bool save_doubles) {
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
const int kShadowSpace = 4;
arg_stack_space += kShadowSpace;
#endif
@@ -2615,7 +2720,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
movq(c_entry_fp_operand, Immediate(0));
}
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
static const int kRegisterPassedArguments = 4;
#else
static const int kRegisterPassedArguments = 6;
@@ -2634,7 +2739,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
DCHECK_GE(num_arguments, 0);
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
return num_arguments;
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 8e7766c7e1..f38da45788 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -152,8 +152,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Roundsd, roundsd)
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
+ AVX_OP(Sqrtpd, sqrtpd)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
+ AVX_OP(Pshufb, pshufb)
+ AVX_OP(Paddusb, paddusb)
+ AVX_OP(Psignd, psignd)
+ AVX_OP(Pand, pand)
+ AVX_OP(Por, por)
+ AVX_OP(Pxor, pxor)
+ AVX_OP(Psubd, psubd)
+ AVX_OP(Pslld, pslld)
+ AVX_OP(Psrad, psrad)
+ AVX_OP(Psrld, psrld)
+ AVX_OP(Paddd, paddd)
+ AVX_OP(Pmulld, pmulld)
+ AVX_OP(Pminsd, pminsd)
+ AVX_OP(Pminud, pminud)
+ AVX_OP(Pmaxsd, pmaxsd)
+ AVX_OP(Pmaxud, pmaxud)
+ AVX_OP(Pcmpgtd, pcmpgtd)
#undef AVX_OP
@@ -314,6 +332,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
// Convert smi to word-size sign-extended value.
+ void SmiUntag(Register reg);
+ // Requires dst != src
void SmiUntag(Register dst, Register src);
void SmiUntag(Register dst, Operand src);
@@ -365,14 +385,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pextrw(Register dst, XMMRegister src, int8_t imm8);
+ void Pextrb(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
+ void Pinsrw(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
+ void Pinsrb(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
void Psllq(XMMRegister dst, byte imm8);
void Psrlq(XMMRegister dst, byte imm8);
void Pslld(XMMRegister dst, byte imm8);
void Psrld(XMMRegister dst, byte imm8);
+ void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -414,7 +442,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// stack check, do it before calling this function because this function may
// write into the newly allocated space. It may also overwrite the given
// register's value, in the version that takes a register.
-#ifdef V8_OS_WIN
+#ifdef V8_TARGET_OS_WIN
void AllocateStackSpace(Register bytes_scratch);
void AllocateStackSpace(int bytes);
#else
@@ -647,10 +675,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -665,6 +693,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Conversions between tagged smi values and non-tagged integer values.
// Tag an word-size value. The result must be known to be a valid smi value.
+ void SmiTag(Register reg);
+ // Requires dst != src
void SmiTag(Register dst, Register src);
// Simple comparison of smis. Both sides must be known smis to use these,
@@ -917,7 +947,7 @@ inline Operand NativeContextOperand() {
// Provides access to exit frame stack space (not GCed).
inline Operand StackSpaceOperand(int index) {
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
const int kShaddowSpace = 4;
return Operand(rsp, (index + kShaddowSpace) * kSystemPointerSize);
#else
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 199571f088..181da9d9f3 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -88,7 +88,7 @@ constexpr int kNumJSCallerSaved = 5;
// Number of registers for which space is reserved in safepoints.
constexpr int kNumSafepointRegisters = 16;
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
// Windows calling convention
constexpr Register arg_reg_1 = rcx;
constexpr Register arg_reg_2 = rdx;
@@ -100,7 +100,7 @@ constexpr Register arg_reg_1 = rdi;
constexpr Register arg_reg_2 = rsi;
constexpr Register arg_reg_3 = rdx;
constexpr Register arg_reg_4 = rcx;
-#endif // _WIN64
+#endif // V8_TARGET_OS_WIN
#define DOUBLE_REGISTERS(V) \
V(xmm0) \
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 8ba54e85b4..8af06ae92c 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -6,6 +6,7 @@
#define V8_CODEGEN_X64_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
+ V(sqrtpd, 66, 0F, 51) \
V(andnpd, 66, 0F, 55) \
V(addpd, 66, 0F, 58) \
V(mulpd, 66, 0F, 59) \