summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h55
-rw-r--r--deps/v8/src/arm/assembler-arm.cc69
-rw-r--r--deps/v8/src/arm/assembler-arm.h25
-rw-r--r--deps/v8/src/arm/builtins-arm.cc437
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc344
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h2
-rw-r--r--deps/v8/src/arm/codegen-arm.cc9
-rw-r--r--deps/v8/src/arm/codegen-arm.h3
-rw-r--r--deps/v8/src/arm/constants-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.h6
-rw-r--r--deps/v8/src/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/arm/debug-arm.cc248
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc4
-rw-r--r--deps/v8/src/arm/disasm-arm.cc2
-rw-r--r--deps/v8/src/arm/frames-arm.cc2
-rw-r--r--deps/v8/src/arm/frames-arm.h6
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc5600
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc41
-rw-r--r--deps/v8/src/arm/lithium-arm.cc52
-rw-r--r--deps/v8/src/arm/lithium-arm.h52
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc138
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc2
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc47
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h78
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc1199
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h219
-rw-r--r--deps/v8/src/arm/simulator-arm.cc17
-rw-r--r--deps/v8/src/arm/simulator-arm.h13
29 files changed, 853 insertions, 7823 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4b4e1d3208..523000ec3a 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -40,7 +40,7 @@
#include "src/arm/assembler-arm.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -97,7 +97,7 @@ DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
}
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@@ -272,19 +272,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
+Address RelocInfo::debug_call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
+ target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -293,23 +292,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -353,11 +335,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -380,11 +359,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -504,11 +480,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Address Assembler::return_address_from_call_start(Address pc) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 481a3b5ced..633b5d12c0 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -34,8 +34,6 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
@@ -1326,7 +1324,8 @@ int Assembler::branch_offset(Label* L) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
- BlockConstPoolFor(1);
+ if (!is_const_pool_blocked()) BlockConstPoolFor(1);
+
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -2573,6 +2572,12 @@ void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
uint32_t enc;
+ // If the embedded constant pool is disabled, we can use the normal, inline
+ // constant pool. If the embedded constant pool is enabled (via
+ // FLAG_enable_embedded_constant_pool), we can only use it where the pool
+ // pointer (pp) is valid.
+ bool can_use_pool =
+ !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
//
@@ -2583,7 +2588,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
+ } else if (FLAG_enable_vldr_imm && can_use_pool) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -3588,11 +3593,10 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
- // No relocation info should be pending while using db. db is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using db.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ // db is used to write raw data. The constant pool should be emitted or
+ // blocked before using db.
+ DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -3600,11 +3604,10 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
- // No relocation info should be pending while using dd. dd is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ // dd is used to write raw data. The constant pool should be emitted or
+ // blocked before using dd.
+ DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -3612,11 +3615,10 @@ void Assembler::dd(uint32_t data) {
void Assembler::dq(uint64_t value) {
- // No relocation info should be pending while using dq. dq is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ // dq is used to write raw data. The constant pool should be emitted or
+ // blocked before using dq.
+ DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
@@ -3755,11 +3757,13 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
num_pending_32_bit_constants_ * kPointerSize;
+ bool has_int_values = (num_pending_32_bit_constants_ > 0);
bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false;
if (has_fp_values) {
- require_64_bit_align = IsAligned(
- reinterpret_cast<intptr_t>(pc_ + size_up_to_marker), kDoubleAlignment);
+ require_64_bit_align =
+ !IsAligned(reinterpret_cast<intptr_t>(pc_ + size_up_to_marker),
+ kDoubleAlignment);
if (require_64_bit_align) {
estimated_size_after_marker += kInstrSize;
}
@@ -3776,9 +3780,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+ DCHECK(has_fp_values || has_int_values);
bool need_emit = false;
if (has_fp_values) {
+ // The 64-bit constants are always emitted before the 32-bit constants, so
+ // we can ignore the effect of the 32-bit constants on estimated_size.
int dist64 = pc_offset() + estimated_size -
num_pending_32_bit_constants_ * kPointerSize -
first_const_pool_64_use_;
@@ -3787,10 +3793,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
need_emit = true;
}
}
- int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
- if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
- (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
- need_emit = true;
+ if (has_int_values) {
+ int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
+ if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+ (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+ need_emit = true;
+ }
}
if (!need_emit) return;
}
@@ -3839,7 +3847,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
bind(&size_check);
// Emit jump over constant pool if necessary.
- if (require_jump) b(size - kPcLoadDelta);
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
// Put down constant pool marker "Undefined instruction".
// The data size helps disassembly know what to print.
@@ -3923,6 +3934,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RecordComment("]");
DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
}
// Since a constant pool was just emitted, move the check offset forward by
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 5d66c39a77..d0fcac206e 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -737,9 +737,6 @@ class Assembler : public AssemblerBase {
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
- // Return the code target address of the patch debug break slot
- INLINE(static Address break_address_from_return_address(Address pc));
-
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -758,30 +755,18 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- // Patched return sequence is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
- static const int kJSReturnSequenceInstructions = 4;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstrSize;
- static const int kDebugBreakSlotInstructions = 3;
+ static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1354,11 +1339,11 @@ class Assembler : public AssemblerBase {
// Debugging
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 2859f97dc4..cf91753e1a 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -311,39 +309,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- if (create_memento) {
- // Get the cell or allocation site.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ push(r2);
- }
-
- __ push(r1); // argument for Runtime_NewObject
- __ push(original_constructor); // original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ mov(r4, r0);
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -363,32 +330,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(r2, r4);
- __ push(r2);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(r2, r4);
+ __ push(r2);
__ SmiTag(r0);
__ push(r0);
__ push(r1);
- if (use_new_target) {
- __ push(r3);
- }
-
- Label rt_call, allocated, normal_new, count_incremented;
- __ cmp(r1, r3);
- __ b(eq, &normal_new);
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, r3, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ push(r3);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
@@ -396,11 +349,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ tst(r2, r2);
__ b(ne, &rt_call);
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(r1, r3);
+ __ b(ne, &rt_call);
+
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r2, r5, r4, MAP_TYPE);
__ b(ne, &rt_call);
// Check that the constructor is not constructing a JSFunction (see
@@ -408,7 +365,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+ __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
if (!is_api_function) {
@@ -439,12 +396,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
+ Label rt_call_reload_new_target;
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
if (create_memento) {
__ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
}
- __ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(r3, r4, r5, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -481,8 +439,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ __ Ubfx(r0, r0, Map::kInObjectPropertiesOrConstructorFunctionIndexByte *
+ kBitsPerByte,
+ kBitsPerByte);
+ __ ldr(r2, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
+ __ Ubfx(r2, r2, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
+ __ sub(r0, r0, Operand(r2));
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
@@ -509,7 +472,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Load the AllocationSite
- __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r6, MemOperand(sp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(r6, r0);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
} else {
@@ -518,104 +482,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ add(r4, r4, Operand(kHeapObjectTag));
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- // r1: constructor function
- // r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ Allocate(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ SmiTag(r0, r3);
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
- // r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ InitializeFieldsWithFiller(r2, r6, r0);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
// Continue with JSObject being successfully allocated
- // r1: constructor function
// r4: JSObject
__ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
+ // Reload the original constructor and fall-through.
+ __ bind(&rt_call_reload_new_target);
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// r1: constructor function
+ // r3: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, r1, &count_incremented,
- &allocated);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
+ __ push(r2); // argument 1: allocation site
+ }
+
+ __ push(r1); // argument 2/1: constructor function
+ __ push(r3); // argument 3/2: original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(r4, r0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ ldr(r2, MemOperand(sp, offset));
+ __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
@@ -630,9 +540,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ pop(r3);
- }
+ __ pop(r3);
__ pop(r1);
// Retrieve smi-tagged arguments count from the stack.
@@ -641,9 +549,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
- if (use_new_target) {
- __ push(r3);
- }
+ __ push(r3);
__ push(r4);
__ push(r4);
@@ -657,8 +563,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target (if used)
- // sp[2/3]: number of arguments (smi-tagged)
+ // sp[2]: new.target
+ // sp[3]: number of arguments (smi-tagged)
Label loop, entry;
__ SmiTag(r3, r0);
__ b(&entry);
@@ -683,17 +589,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -703,9 +607,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -723,10 +627,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ ldr(r1, MemOperand(sp, offset));
+ // sp[1]: new.target (original constructor)
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -739,17 +642,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -763,12 +661,12 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ __ AssertUndefinedOrAllocationSite(r2, r4);
+ __ push(r2);
+
__ mov(r4, r0);
__ SmiTag(r4);
__ push(r4); // Smi-tagged arguments count.
@@ -970,6 +868,147 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o r1: the JS function object being called.
+// o cp: our context
+// o pp: the caller's constant pool pointer (if enabled)
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushFixedFrame(r1);
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ sub(r9, sp, Operand(r4));
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ __ cmp(r9, Operand(r2));
+ __ b(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ b(&loop_check, al);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(r9);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ sub(r4, r4, Operand(kPointerSize), SetCC);
+ __ b(&loop_header, ge);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ sub(kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ kPointerSizeLog2));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in r0.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(lr);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1282,8 +1321,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ SmiTag(r0);
__ push(r0);
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(r0, r2);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(r2, r0);
__ pop(r0);
@@ -1396,6 +1436,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1413,12 +1454,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ mov(slot, Operand(Smi::FromInt(index)));
- __ Move(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ mov(slot, Operand(Smi::FromInt(slot_index)));
+ __ ldr(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1453,6 +1491,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(r1);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1467,10 +1512,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
@@ -1519,8 +1562,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to a regular object.
// r0: receiver
__ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ b(&push_receiver);
__ bind(&use_global_proxy);
@@ -1533,8 +1576,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(r0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1573,6 +1616,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(r1);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1595,33 +1645,28 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
- // Push newTarget and callee functions
- __ ldr(r0, MemOperand(fp, kNewTargetOffset));
- __ push(r0);
+ // Push the constructor function as callee.
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ push(r0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ ldr(r4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
__ add(sp, sp, Operand(kStackSize * kPointerSize));
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 005fb97513..8193816c84 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
@@ -14,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -33,7 +31,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -49,7 +47,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -255,6 +253,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
+ __ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -273,6 +274,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
+ __ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@@ -675,26 +679,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
}
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -1583,7 +1591,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1831,10 +1839,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2378,32 +2383,41 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// r0 : number of arguments to the construct function
- // r2 : Feedback vector
- // r3 : slot in feedback vector (Smi)
// r1 : the function to call
+ // r2 : feedback vector
+ // r3 : slot in feedback vector (Smi)
+ // r4 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
+ if (is_super) {
+ __ Push(r4);
+ }
__ CallStub(stub);
+ if (is_super) {
+ __ Pop(r4);
+ }
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : Feedback vector
+ // r2 : feedback vector
// r3 : slot in feedback vector (Smi)
+ // r4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2411,23 +2425,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r4.
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ // Load the cache state into r5.
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- // We don't know if r4 is a WeakCell or a Symbol, but it's harmless to read at
+ // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
- Register feedback_map = r5;
- Register weak_value = r6;
- __ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
+ Register feedback_map = r6;
+ Register weak_value = r9;
+ __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
- __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
+ __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
- __ ldr(feedback_map, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ b(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
@@ -2445,8 +2459,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ b(ne, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
- __ cmp(r1, r4);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
}
@@ -2455,14 +2469,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
+ __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function
@@ -2470,22 +2484,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
- __ cmp(r1, r4);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
__ b(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ b(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -2535,8 +2549,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ push(r1);
+ __ mov(r0, r3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(r1);
}
__ str(r0, MemOperand(sp, argc * kPointerSize));
@@ -2607,18 +2623,18 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
- // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // r3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r4 : original constructor (for IsSuperConstructorCall)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
if (FLAG_pretenuring_call_new) {
@@ -2642,9 +2658,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
- __ mov(r4, Operand(1 * kPointerSize));
- __ add(r4, r4, Operand(r0, LSL, kPointerSizeLog2));
- __ ldr(r3, MemOperand(sp, r4));
+ __ mov(r3, r4);
} else {
__ mov(r3, r1);
}
@@ -2658,10 +2672,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r4: object type
+ // r5: object type
Label do_call;
__ bind(&slow);
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -2898,11 +2912,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@@ -3014,10 +3027,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ tst(code_, Operand(kSmiTagMask |
+ ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -3294,7 +3306,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -3481,7 +3493,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3762,7 +3774,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -3814,15 +3826,12 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -3883,7 +3892,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -3973,8 +3982,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
- // Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
@@ -4058,10 +4067,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ add(index, dictionary, Operand(index, LSL, 2));
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4528,7 +4537,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, r9);
__ bind(&miss);
@@ -4667,8 +4676,9 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
- int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
- PredictableCodeSizeScope predictable(masm, code_size);
+ PredictableCodeSizeScope predictable(masm);
+ predictable.ExpectSize(masm->CallStubSize(&stub) +
+ 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@@ -4772,12 +4782,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
@@ -5051,6 +5061,158 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = r0;
+ Register slot = r2;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ ldr(result, ContextOperand(result));
+ __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ __ Ret(ne);
+
+ // Fallback to runtime.
+ __ SmiTag(slot);
+ __ push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register value = r0;
+ Register slot = r2;
+
+ Register cell = r1;
+ Register cell_details = r4;
+ Register cell_value = r5;
+ Register cell_value_map = r6;
+ Register scratch = r9;
+
+ Register context = cp;
+ Register context_temp = cell;
+
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ ldr(cell, ContextOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details);
+ __ and_(cell_details, cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(ne, &not_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+
+ __ bind(&fast_heapobject_case);
+ __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ mov(r4, Operand(value));
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(&not_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ cmp(cell_value, value);
+ __ b(ne, &not_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
+ __ b(ne, &slow_case);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(eq, &done);
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(eq, &done);
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(eq, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(ne, &slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Ret();
+
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ cmp(cell_value_map, scratch);
+ __ b(eq, &fast_heapobject_case);
+
+ // Fallback to runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index ddea33a34b..b2b2c08cd8 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -5,6 +5,8 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
+#include "src/arm/frames-arm.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index a456996a27..6a9f4677f6 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
@@ -888,10 +886,9 @@ CodeAgingHelper::CodeAgingHelper() {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- SmartPointer<CodePatcher> patcher(
- new CodePatcher(young_sequence_.start(),
- young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(new CodePatcher(
+ young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 4c7c7688fd..d36ce59d66 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -12,9 +12,6 @@ namespace v8 {
namespace internal {
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 0749356909..9fefc3140a 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 7b8529c4bb..6d544f3f36 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -5,6 +5,12 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
+
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
#error ARM EABI support is required.
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index dd2d13c686..f291ba92ca 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -12,8 +12,6 @@
#endif
#endif
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
deleted file mode 100644
index 7d9313200b..0000000000
--- a/deps/v8/src/arm/debug-arm.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Patch the code changing the return from JS function sequence from
- // mov sp, fp
- // ldmia sp!, {fp, lr}
- // add sp, sp, #4
- // bx lr
- // to a call to the debug break return code.
- // ldr ip, [pc, #0]
- // blx ip
- // <debug break return code entry point address>
- // bkpt 0
- CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry());
- patcher.masm()->bkpt(0);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from
- // mov r2, r2
- // mov r2, r2
- // mov r2, r2
- // to a call to the debug break slot code.
- // ldr ip, [pc, #0]
- // blx ip
- // <debug break slot code entry point address>
- CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry());
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(ip);
- }
- __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(ip);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, kUnableToEncodeValueAsSmi);
- }
- __ SmiTag(reg);
- }
- }
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ mov(r0, Operand::Zero()); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ SmiUntag(reg);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
- }
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ ldr(ip, MemOperand(ip));
- __ Jump(ip);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- r3 : slot in feedback array (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -- r2 : feedback array
- // -- r3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ mov(r1, Operand::Zero());
- __ str(r1, MemOperand(ip, 0));
-
- // Load the function pointer off of our current stack frame.
- __ ldr(r1, MemOperand(fp,
- StandardFrameConstants::kConstantPoolOffset - kPointerSize));
-
- // Pop return address, frame and constant pool pointer (if
- // FLAG_enable_embedded_constant_pool).
- __ LeaveFrame(StackFrame::INTERNAL);
-
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- // Load context from the function.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Get function code.
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Re-run JSFunction, r1 is function, cp is context.
- __ Jump(ip);
- }
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index a9bcea9726..312bb00df3 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 1c1516d168..0cc24e00af 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -28,8 +28,6 @@
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 3f3c4f04c2..2004ff6ae9 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index db6a9e52e0..dcba34f017 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -128,12 +128,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
deleted file mode 100644
index e4b7cf34ee..0000000000
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ /dev/null
@@ -1,5600 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
-#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
-
-#include "src/arm/code-stubs-arm.h"
-#include "src/arm/macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-// A patch site is a location in the code which it is possible to patch. This
-// class has a number of methods to emit the code which is patchable and the
-// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
-// immediate value is used) is the delta from the pc to the first instruction of
-// the patchable code.
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- DCHECK(patch_site_.is_bound() == info_emitted_);
- }
-
- // When initially emitting this ensure that a jump is always generated to skip
- // the inlined smi code.
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- DCHECK(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(eq, target); // Always taken before patched.
- }
-
- // When initially emitting this ensure that a jump is never generated to skip
- // the inlined smi code.
- void EmitJumpIfSmi(Register reg, Label* target) {
- DCHECK(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(ne, target); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- // Block literal pool emission whilst recording patch site information.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o r1: the JS function object being called (i.e., ourselves)
-// o cp: our context
-// o pp: our caller's constant pool pointer (if enabled)
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o lr: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- profiling_counter_ = isolate()->factory()->NewCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
- Label ok;
- int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ ldr(r2, MemOperand(sp, receiver_offset));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(ne, &ok);
-
- __ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-
- __ str(r2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
- if (locals_count > 0) {
- if (locals_count >= 128) {
- Label ok;
- __ sub(r9, sp, Operand(locals_count * kPointerSize));
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- __ cmp(r9, Operand(r2));
- __ b(hs, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- __ bind(&ok);
- }
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
- int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
- if (locals_count >= kMaxPushes) {
- int loop_iterations = locals_count / kMaxPushes;
- __ mov(r2, Operand(loop_iterations));
- Label loop_header;
- __ bind(&loop_header);
- // Do pushes.
- for (int i = 0; i < kMaxPushes; i++) {
- __ push(r9);
- }
- // Continue loop if not done.
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(&loop_header, ne);
- }
- int remaining = locals_count % kMaxPushes;
- // Emit the remaining pushes.
- for (int i = 0; i < remaining; i++) {
- __ push(r9);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
- // Argument to NewContext is the function, which is still in r1.
- Comment cmnt(masm_, "[ Allocate context");
- bool need_write_barrier = true;
- int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (info->scope()->is_script_scope()) {
- __ push(r1);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in r0. It replaces the context passed to us.
- // It's saved in the stack and kept live in cp.
- __ mov(cp, r0);
- __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
-
- // Update the write barrier.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, r0, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- }
-
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
- __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
- }
- SetVar(this_function_var, r1, r0, r2);
- }
-
- Variable* new_target_var = scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
-
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- Label non_construct_frame, done;
-
- __ b(ne, &non_construct_frame);
- __ ldr(r0,
- MemOperand(r2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ b(&done);
-
- __ bind(&non_construct_frame);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
-
- SetVar(new_target_var, r0, r2, r3);
- }
-
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r2, Operand(Smi::FromInt(num_parameters)));
- __ mov(r1, Operand(Smi::FromInt(rest_index)));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(r3, r2, r1, r0);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, r0, r1, r2);
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(num_parameters)));
- __ Push(r3, r2, r1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
-
- SetVar(arguments, r0, r1, r2);
- }
-
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm_,
- masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
- DCHECK(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-
- // Force emit the constant pool, so it doesn't get emitted in the middle
- // of the back edge table.
- masm()->CheckConstPool(true, false);
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ mov(r0, Operand(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
- __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
-}
-
-
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
-#else
-static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
-#endif
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- PredictableCodeSizeScope predictable_code_size_scope(
- masm_, kProfileCounterResetSequenceLength);
- Label start;
- __ bind(&start);
- int reset_value = FLAG_interrupt_budget;
- if (info_->is_debug()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
- __ mov(r2, Operand(profiling_counter_));
- // The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
- // instructions (for ARMv6) depending upon whether it is an extended constant
- // pool - insert nop to compensate.
- int expected_instr_count =
- (kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
- DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
- while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
- __ nop();
- }
- __ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting back edge code.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- Label ok;
-
- DCHECK(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- int weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- __ bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- int32_t arg_count = info_->scope()->num_parameters() + 1;
- int32_t sp_delta = arg_count * kPointerSize;
- SetReturnPosition(function());
- // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
- PredictableCodeSizeScope predictable(masm_, -1);
- __ RecordJSReturn();
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
- }
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp, 0));
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- DCHECK(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ push(ip);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- DCHECK(materialize_true == true_label_);
- DCHECK(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(ip, value_root_index);
- __ push(ip);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- if (false_label_ != fall_through_) __ b(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
- __ tst(result_register(), result_register());
- Split(ne, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ b(cond, if_true);
- } else if (if_true == fall_through) {
- __ b(NegateCondition(cond), if_false);
- } else {
- __ b(cond, if_true);
- __ b(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- DCHECK(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return MemOperand(fp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- // Use destination as scratch.
- MemOperand location = VarOperand(var, dest);
- __ ldr(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- DCHECK(!scratch0.is(src));
- DCHECK(!scratch0.is(scratch1));
- DCHECK(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ str(src, location);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
- __ Check(ne, kDeclarationInWithContext);
- __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, kDeclarationInCatchContext);
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- break;
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, StackOperand(variable));
- }
- break;
-
- case VariableLocation::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, ContextOperand(cp, variable->index()));
- // No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r2, r1, r0);
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
- __ Push(cp, r2, r1, r0);
- }
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), StackOperand(variable));
- break;
- }
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), ContextOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r2, Operand(variable->name()));
- __ mov(r1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, r2, r1);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ ldr(r1, MemOperand(sp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r1, r0);
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
-
- __ cmp(r1, r0);
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
- CallIC(ic, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- Label skip;
- __ b(&skip);
- PrepareForBailout(clause, TOS_REG);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &next_test);
- __ Drop(1);
- __ jmp(clause->body_target());
- __ bind(&skip);
-
- __ cmp(r0, Operand::Zero());
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ b(nested_statement.break_label());
- } else {
- __ b(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt, SKIP_BREAK);
-
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionAsStatementPosition(stmt->enumerable());
- VisitForAccumulatorValue(stmt->enumerable());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &exit);
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- __ b(eq, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &done_convert);
- __ bind(&convert);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
- __ push(r0);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- __ b(le, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &fixed_array);
-
- // We got a map in register r0. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(r1, r0);
- __ cmp(r1, Operand(Smi::FromInt(0)));
- __ b(eq, &no_descriptors);
-
- __ LoadInstanceDescriptors(r0, r2);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(r0); // Map.
- __ mov(r0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(r2, r1, r0);
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ Drop(1);
- __ jmp(&exit);
-
- // We got a fixed array in register r0. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- __ Move(r1, FeedbackVector());
- __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
- __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
-
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
- __ b(gt, &non_proxy);
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ Push(r1, r0); // Smi and array
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ Push(r1, r0); // Fixed array length (as smi) and initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- SetExpressionAsStatementPosition(stmt->each());
-
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
- __ cmp(r0, r1); // Compare to the array length.
- __ b(hs, loop_statement.break_label());
-
- // Get the current entry of the array into register r3.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register r2.
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r4, Operand(r2));
- __ b(eq, &update_each);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ cmp(r2, Operand(Smi::FromInt(0)));
- __ b(eq, &update_each);
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ push(r1); // Enumerable.
- __ push(r3); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ mov(r3, Operand(r0));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, loop_statement.continue_label());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register r3.
- __ bind(&update_each);
- __ mov(result_register(), r3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for the going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ pop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- __ push(r0);
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ b(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
- __ mov(r2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ mov(r0, Operand(info));
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ ldr(StoreDescriptor::ValueRegister(),
- MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = r1;
- Register temp = r2;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- __ cmp(temp, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- // Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
- __ b(&loop);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = r3;
- Register temp = r4;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
- // Record position before possible IC call.
- SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED: {
- Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
- context()->Plug(r0);
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
- Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
- : "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- context()->Plug(r0);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
- __ bind(&slow);
- __ mov(r1, Operand(var->name()));
- __ Push(cp, r1); // Context and name.
- Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
- __ bind(&done);
- context()->Plug(r0);
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, d0, size / kPointerSize);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = expr->ComputeFlags();
- __ mov(r0, Operand(Smi::FromInt(flags)));
- if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
- }
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in r0.
- bool result_saved = false;
-
- AccessorTable accessor_table(zone());
- int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key()->AsLiteral();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- // It is safe to use [[Put]] here because the boilerplate already
- // contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- DCHECK(StoreDescriptor::ValueRegister().is(r0));
- __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
- __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
-
- if (NeedsHomeObject(value)) {
- __ Move(StoreDescriptor::ReceiverRegister(), r0);
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ ldr(StoreDescriptor::ValueRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
- }
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
- __ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
- __ push(r0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- break;
-
- case ObjectLiteral::Property::GETTER:
- if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
- }
- break;
- case ObjectLiteral::Property::SETTER:
- if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
- }
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
- EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
- }
-
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on the stack
- result_saved = true;
- }
-
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
- VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
- } else {
- __ Drop(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
- break;
-
- case ObjectLiteral::Property::SETTER:
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
- break;
- }
- }
- }
-
- if (expr->has_function()) {
- DCHECK(result_saved);
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- expr->BuildConstantElements(isolate());
-
- Handle<FixedArray> constant_elements = expr->constant_elements();
- bool has_fast_elements =
- IsFastObjectElementsKind(expr->constant_elements_kind());
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_elements));
- if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ mov(r0, Operand(Smi::FromInt(expr->ComputeFlags())));
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
- } else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
- }
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
-
- bool result_saved = false; // Is the result saved to the stack?
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
-
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- if (!result_saved) {
- __ push(r0);
- __ Push(Smi::FromInt(expr->literal_index()));
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
- __ str(result_register(), FieldMemOperand(r1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(r1, offset, result_register(), r2,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ mov(r3, Operand(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- __ pop(); // literal index
- __ Pop(r0);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- __ Push(r0);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
- }
-
- if (result_saved) {
- __ pop(); // literal index
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
-
- Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
-
- Property* property = expr->target()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the register.
- VisitForStackValue(property->obj());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
- if (expr->is_compound()) {
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- __ Push(result_register());
- if (expr->is_compound()) {
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
- }
- break;
- case KEYED_PROPERTY:
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(sp, 1 * kPointerSize));
- __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- SetExpressionPosition(expr);
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(r0);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(r0);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- switch (expr->yield_kind()) {
- case Yield::kSuspend:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ push(result_register());
- // Fall through.
- case Yield::kInitial: {
- Label suspend, continuation, post_runtime, resume;
-
- __ jmp(&suspend);
-
- __ bind(&continuation);
- __ jmp(&resume);
-
- __ bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r1);
- __ b(eq, &post_runtime);
- __ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- __ pop(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ str(r1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
- // Pop value from top-of-stack slot, box result into result register.
- EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
- break;
- }
-
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r3, r0); // "throw", iter, except
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(r0); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(r0); // result
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ jmp(&l_resume);
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ ldr(r0, MemOperand(sp, generator_object_depth));
- __ push(r0); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(r0); // result
- EmitReturnSequence();
- __ bind(&l_resume); // received in r0
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r3, r0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ ldr(load_receiver, MemOperand(sp, kPointerSize));
- __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mov(r1, r0);
- __ str(r1, MemOperand(sp, 2 * kPointerSize));
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
-
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ bind(&l_loop);
- __ Move(load_receiver, r0);
-
- __ push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ cmp(r0, Operand(0));
- __ b(eq, &l_try);
-
- // result.value
- __ pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
- context()->DropAndPlug(2, r0); // drop iter and g
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in r0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // r1 will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- __ pop(r1);
-
- // Load suspended function and context.
- __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
- __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ push(r2);
-
- // Push holes for the rest of the arguments to the generator function.
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
- __ b(mi, &push_frame);
- __ push(r2);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ bl(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- // lr = return address.
- // fp = caller's frame pointer.
- // pp = caller's constant pool (if FLAG_enable_embedded_constant_pool),
- // cp = callee's context,
- // r4 = callee's JS function.
- __ PushFixedFrame(r4);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-
- // Load the operand stack size.
- __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
- __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ SmiUntag(r3);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ cmp(r3, Operand(0));
- __ b(ne, &slow_resume);
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
-
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- if (FLAG_enable_embedded_constant_pool) {
- // Load the new code object's constant pool pointer.
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
- }
-
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Jump(r3);
- }
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(mi, &call_resume);
- __ push(r2);
- __ b(&push_operand_holes);
- __ bind(&call_resume);
- DCHECK(!result_register().is(r1));
- __ Push(r1, result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
- // Not reached: the runtime call returns elsewhere.
- __ stop("not-reached");
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
-
- __ Allocate(instance_size, r0, r2, r3, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- __ bind(&allocated);
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
- __ pop(r2);
- __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
- __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2,
- FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
- __ str(r3,
- FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
- r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!prop->IsSuperAccess());
-
- __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r2;
- Register scratch2 = r3;
-
- // Get the arguments.
- Register left = r1;
- Register right = r0;
- __ pop(left);
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- switch (op) {
- case Token::SAR:
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- __ bic(right, right, Operand(kSmiTagMask));
- break;
- case Token::SHL: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ TrySmiTag(right, scratch1, &stub_call);
- break;
- }
- case Token::SHR: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD:
- __ add(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::SUB:
- __ sub(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::MUL: {
- __ SmiUntag(ip, right);
- __ smull(scratch1, scratch2, left, ip);
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &stub_call);
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ b(ne, &done);
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ b(mi, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
- // Constructor is in r0.
- DCHECK(lit != NULL);
- __ push(r0);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = r1;
- __ ldr(scratch,
- FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ push(scratch);
-
- for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- if (property->is_static()) {
- __ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
- } else {
- __ ldr(scratch, MemOperand(sp, 0)); // prototype
- }
- __ push(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
- __ push(r0);
- }
-
- VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
- break;
-
- case ObjectLiteral::Property::GETTER:
- __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
- break;
-
- case ObjectLiteral::Property::SETTER:
- __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ pop(r1);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
-
- Property* prop = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(prop);
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ Move(StoreDescriptor::ReceiverRegister(), r0);
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- __ Push(r0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; r0: home_object
- Register scratch = r2;
- Register scratch2 = r3;
- __ mov(scratch, result_register()); // home_object
- __ ldr(r0, MemOperand(sp, kPointerSize)); // value
- __ ldr(scratch2, MemOperand(sp, 0)); // this
- __ str(scratch2, MemOperand(sp, kPointerSize)); // this
- __ str(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; r0: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- __ Push(r0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = r2;
- Register scratch2 = r3;
- __ ldr(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; r0: key, r3: value
- __ ldr(scratch, MemOperand(sp, kPointerSize)); // this
- __ str(scratch, MemOperand(sp, 2 * kPointerSize));
- __ ldr(scratch, MemOperand(sp, 0)); // home_object
- __ str(scratch, MemOperand(sp, kPointerSize));
- __ str(r0, MemOperand(sp, 0));
- __ Move(r0, scratch2);
- // stack: this, home_object, key; r0: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ Move(StoreDescriptor::NameRegister(), r0);
- __ Pop(StoreDescriptor::ValueRegister(),
- StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
- // Global var, const, or let.
- __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &assign);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &const_error);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, r1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
- }
-
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(r0);
- __ mov(r0, Operand(var->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
- }
- // Silently ignore store in sloppy mode.
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- DCHECK(prop != NULL);
- DCHECK(prop->key()->IsLiteral());
-
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r0 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- __ Push(key->value());
- __ Push(r0);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r0 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- __ Push(r0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
- __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
- DCHECK(StoreDescriptor::ValueRegister().is(r0));
-
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), r0);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), r0);
- __ pop(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
- NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
-
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
- // Get the target function.
- if (call_type == CallICState::FUNCTION) {
- { StackValueContext context(this);
- EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
- }
- // Push undefined as receiver. This is patched in the method prologue if it
- // is a sloppy mode method.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
- } else {
- // Load the function from the receiver.
- DCHECK(callee->IsProperty());
- DCHECK(!callee->AsProperty()->IsSuperAccess());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
- }
-
- EmitCall(expr, call_type);
-}
-
-
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- const Register scratch = r1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r0);
- __ Push(r0);
- __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- __ Push(scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
-
- // Replace home_object with target function.
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr, CallICState::METHOD);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- Expression* callee = expr->expression();
-
- // Load the function from the receiver.
- DCHECK(callee->IsProperty());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- __ Move(LoadDescriptor::NameRegister(), r0);
- EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
-
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
-
- EmitCall(expr, CallICState::METHOD);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- const Register scratch = r1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r0);
- __ Push(r0);
- __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- __ Push(scratch);
- VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
-
- // Replace home_object with target function.
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr, CallICState::METHOD);
-}
-
-
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- SetExpressionPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
-
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // r4: copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- }
-
- // r3: the receiver of the enclosing function.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // r2: language mode.
- __ mov(r2, Operand(Smi::FromInt(language_mode())));
-
- // r1: the start position of the scope the calls resides in.
- __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
-
- // Do the runtime call.
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
-}
-
-
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(r1, this_var);
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ b(eq, &uninitialized_this);
- __ mov(r0, Operand(this_var->name()));
- __ Push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
- // and the object holding it (returned in edx).
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(callee->name()));
- __ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(r0, r1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call
- // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
- // to call. Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
-
- // Touch up the stack with the resolved function.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-
- // Record source position for debugger.
- SetExpressionPosition(expr);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
- // Emit function call.
- EmitCall(expr);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- DCHECK(!expr->expression()->IsSuperPropertyReference());
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetExpressionPosition(expr);
-
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ Move(r2, FeedbackVector());
- __ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
-
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetExpressionPosition(expr);
-
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ Move(r2, FeedbackVector());
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
-
- RecordJSReturnSite(expr);
-
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ SmiTst(r0);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ NonNegativeSmiTst(r0);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_false);
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, if_false);
- __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(r0);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, &skip_lookup);
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r2, ip);
- __ b(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand::Zero());
- __ b(eq, &done);
-
- __ LoadInstanceDescriptors(r1, r4);
- // r4: descriptor array.
- // r3: valid entries in the descriptor array.
- __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
- __ mul(r3, r3, ip);
- // Calculate location of the first key name.
- __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of ip to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(isolate()->factory()->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r3, MemOperand(r4, 0));
- __ cmp(r3, ip);
- __ b(eq, if_false);
- __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(r4, Operand(r2));
- __ b(ne, &loop);
-
- __ bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ JumpIfSmi(r2, if_false);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r2, r3);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ cmp(r2, Operand(0x80000000));
- __ cmp(r1, Operand(0x00000000), eq);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_TYPED_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- Register map = r1;
- Register type_reg = r2;
- __ ldr(map, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(r1);
- __ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in r0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- // Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(r0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
- // Map is now in r0.
- __ b(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
-
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ b(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- Register instance_type = r2;
- __ GetMapConstructor(r0, r0, r1, instance_type);
- __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
- __ b(ne, &non_function_constructor);
-
- // r0 now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
- __ b(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ LoadRoot(r0, Heap::kFunction_stringRootIndex);
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_stringRootIndex);
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r0, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_DATE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = r0;
- Register result = r0;
- Register scratch0 = r9;
- Register scratch1 = r1;
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ ldr(scratch1, MemOperand(scratch1));
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r0;
- Register index = r1;
- Register value = r2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
- __ SmiUntag(index, index);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ add(ip,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r0;
- Register index = r1;
- Register value = r2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
- __ SmiUntag(index, index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ add(ip,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ strh(value, MemOperand(ip, index));
- context()->Plug(string);
-}
-
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r1); // r0 = value. r1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(r1, &done);
-
- // If the object is not a value type, return the value.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
- __ b(ne, &done);
-
- // Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(r2, r0);
- __ RecordWriteField(
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
- // Load the argument into r0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r0, r1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r1);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register result = r3;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(r1);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &runtime);
-
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(r0);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // new.target
- VisitForStackValue(args->at(0));
-
- // .this_function
- VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(r0, Operand::Zero());
- __ b(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r1, r1);
- __ mov(r0, r1);
-
- // Get arguments pointer in r2.
- __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- __ Push(r3);
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand::Zero());
- __ b(ne, &loop);
- }
-
- __ bind(&args_set_up);
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
-
- context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(r1);
- __ pop(r2);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = r0;
- Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r2 now holds finger offset as a smi.
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(key, r2);
- __ b(ne, &not_found);
-
- __ ldr(r0, MemOperand(r3, kPointerSize));
- __ b(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(r0);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ IndexFromHash(r0, r0);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator, non_trivial_array,
- not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = r0;
- Register elements = no_reg; // Will be r0.
- Register result = no_reg; // Will be r0.
- Register separator = r1;
- Register array_length = r2;
- Register result_pos = no_reg; // Will be r2
- Register string_length = r3;
- Register string = r4;
- Register element = r5;
- Register elements_end = r6;
- Register scratch = r9;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
- __ b(ne, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, array_length, &bailout);
-
- // If the array has length zero, return the empty string.
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length, SetCC);
- __ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand::Zero());
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ cmp(array_length, Operand::Zero());
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ bind(&loop);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
- __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch), SetCC);
- __ b(vs, &bailout);
- __ cmp(element, elements_end);
- __ b(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, Operand(1));
- __ b(ne, &not_size_one_array);
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ b(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch));
- __ smull(scratch, ip, array_length, scratch);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ cmp(ip, Operand::Zero());
- __ b(ne, &bailout);
- __ tst(scratch, Operand(0x80000000));
- __ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch), SetCC);
- __ b(vs, &bailout);
- __ SmiUntag(string_length);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateOneByteString(result, string_length, scratch,
- string, // used as scratch
- elements_end, // used as scratch
- &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ add(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(1)));
- __ b(eq, &one_char_separator);
- __ b(gt, &long_separator);
-
- // Empty separator case
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &empty_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
- __ b(&done);
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
- __ b(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&long_separator);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &long_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
- __ b(&done);
-
- __ bind(&bailout);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- __ mov(ip, Operand(debug_is_active));
- __ ldrb(r0, MemOperand(ip));
- __ SmiTag(r0);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
-void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- Register receiver = LoadDescriptor::ReceiverRegister();
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
- __ push(receiver);
-
- // Load the function from the receiver.
- __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
-}
-
-
-void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- SetExpressionPosition(expr);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- Comment cmnt(masm_, "[ CallRuntime");
- EmitLoadJSRuntimeFunction(expr);
-
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- EmitCallJSRuntimeFunction(expr);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r0);
-
- } else {
- const Runtime::Function* function = expr->function();
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: { \
- Comment cmnt(masm_, "[ Inline" #Name); \
- return Emit##Name(expr); \
- }
- FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(r0);
- }
- }
- }
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode but
- // "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
- DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
- __ ldr(r2, GlobalObjectOperand());
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
- __ Push(r2, r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(var->name()));
- __ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
- context()->Plug(r0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ jmp(&done);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- {
- AccumulatorValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ mov(r3, r0);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
- context()->Plug(r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
-
- Comment cmnt(masm_, "[ CountOperation");
-
- Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(prop);
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
- }
- switch (assign_type) {
- case NAMED_PROPERTY: {
- // Put the object both on the stack and in the register.
- VisitForStackValue(prop->obj());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- EmitNamedPropertyLoad(prop);
- break;
- }
-
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- __ Push(result_register());
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_PROPERTY: {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(sp, 1 * kPointerSize));
- __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
- EmitKeyedPropertyLoad(prop);
- break;
- }
-
- case VARIABLE:
- UNREACHABLE();
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(r0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- case KEYED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
- break;
- }
- }
- }
-
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vc, &done);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- __ jmp(&stub_call);
- __ bind(&slow);
- }
- if (!is_strong(language_mode())) {
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- case KEYED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
- break;
- }
- }
- }
-
-
- __ bind(&stub_call);
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(count_value)));
-
- SetExpressionPosition(expr);
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- if (is_strong(language_mode())) {
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
- }
- // Store the value returned in r0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(r0);
- }
- // For all contexts except EffectConstant We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- Factory* factory = isolate()->factory();
- if (String::Equals(check, factory->number_string())) {
- __ JumpIfSmi(r0, if_true);
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->string_string())) {
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => false.
- __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
- __ b(ge, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->symbol_string())) {
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->boolean_string())) {
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- __ b(eq, if_true);
- __ CompareRoot(r0, Heap::kFalseValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->undefined_string())) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => true.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through);
-
- } else if (String::Equals(check, factory->function_string())) {
- __ JumpIfSmi(r0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
- Split(eq, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->object_string())) {
- __ JumpIfSmi(r0, if_false);
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- __ b(eq, if_true);
- // Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, if_false);
- __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, if_false);
- // Check for undetectable objects => false.
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ tst(r0, r0);
- Split(eq, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cond = CompareIC::ComputeCondition(op);
- __ pop(r1);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
- Split(cond, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand::Zero());
- Split(cond, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (expr->op() == Token::EQ_STRICT) {
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
- __ cmp(r0, r1);
- Split(eq, if_true, if_false, fall_through);
- } else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmp(r0, Operand(0));
- Split(ne, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r0);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return r0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ mov(ip, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
- } else {
- DCHECK(declaration_scope->is_function_scope());
- __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- __ push(ip);
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- __ SmiTag(r1);
-
- // Store result register while executing finally block.
- __ push(r1);
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(r1));
- // Restore pending message from stack.
- __ pop(r1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
-
- // Restore result register from stack.
- __ pop(r1);
-
- // Uncook return address and return.
- __ pop(result_register());
- __ SmiUntag(r1);
- __ add(pc, r1, Operand(masm_->CodeObject()));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(r1));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- __ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
- __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
- Operand(SmiFromSlot(slot)));
-}
-
-
-#undef __
-
-
-static Address GetInterruptImmediateLoadAddress(Address pc) {
- Address load_address = pc - 2 * Assembler::kInstrSize;
- if (!FLAG_enable_embedded_constant_pool) {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
- } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
- // This is an extended constant pool lookup.
- if (CpuFeatures::IsSupported(ARMv7)) {
- load_address -= 2 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsMovT(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- } else {
- load_address -= 4 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
- }
- } else if (CpuFeatures::IsSupported(ARMv7) &&
- Assembler::IsMovT(Memory::int32_at(load_address))) {
- // This is a movw / movt immediate load.
- load_address -= Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- } else if (!CpuFeatures::IsSupported(ARMv7) &&
- Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
- // This is a mov / orr immediate load.
- load_address -= 3 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
- } else {
- // This is a small constant pool lookup.
- DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
- }
- return load_address;
-}
-
-
-void BackEdgeTable::PatchAt(Code* unoptimized_code,
- Address pc,
- BackEdgeState target_state,
- Code* replacement_code) {
- Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
- Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
- CodePatcher patcher(branch_address, 1);
- switch (target_state) {
- case INTERRUPT:
- {
- // <decrement profiling counter>
- // bpl ok
- // ; load interrupt stub address into ip - either of (for ARMv7):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
- // | movt ip, #imm | movw ip, #imm
- // | ldr ip, [pp, ip]
- // ; or (for ARMv6):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // blx ip
- // <reset profiling counter>
- // ok-label
-
- // Calculate branch offset to the ok-label - this is the difference
- // between the branch address and |pc| (which points at <blx ip>) plus
- // kProfileCounterResetSequence instructions
- int branch_offset = pc - Instruction::kPCReadOffset - branch_address +
- kProfileCounterResetSequenceLength;
- patcher.masm()->b(branch_offset, pl);
- break;
- }
- case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
- // <decrement profiling counter>
- // mov r0, r0 (NOP)
- // ; load on-stack replacement address into ip - either of (for ARMv7):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
- // | movt ip, #imm> | movw ip, #imm
- // | ldr ip, [pp, ip]
- // ; or (for ARMv6):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // blx ip
- // <reset profiling counter>
- // ok-label
- patcher.masm()->nop();
- break;
- }
-
- // Replace the call address.
- Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_immediate_load_address, replacement_code);
-}
-
-
-BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc) {
- DCHECK(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize)));
-
- Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
- Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
- Address interrupt_address = Assembler::target_address_at(
- pc_immediate_load_address, unoptimized_code);
-
- if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
- DCHECK(interrupt_address ==
- isolate->builtins()->InterruptCheck()->entry());
- return INTERRUPT;
- }
-
- DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
-
- if (interrupt_address ==
- isolate->builtins()->OnStackReplacement()->entry()) {
- return ON_STACK_REPLACEMENT;
- }
-
- DCHECK(interrupt_address ==
- isolate->builtins()->OsrAfterStackCheck()->entry());
- return OSR_AFTER_STACK_CHECK;
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 67f65f5cb3..f26b62ccaa 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
const Register InstanceofDescriptor::left() { return r0; }
@@ -62,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
@@ -83,6 +93,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
@@ -158,11 +172,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
- // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // r3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {r0, r1, r2};
+ Register registers[] = {r0, r1, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -353,11 +367,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r1, // math rounding function
+ r3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
+ r4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 55e501762c..1c04ba7ee7 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/arm/lithium-codegen-arm.h"
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
@@ -330,6 +328,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -348,6 +351,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1661,8 +1670,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
@@ -2150,6 +2158,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2218,7 +2235,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2238,10 +2255,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2276,7 +2292,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2308,10 +2324,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
@@ -2437,6 +2450,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index d61c3d4c0d..eea9ece5ae 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -143,6 +144,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1645,15 +1647,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1700,7 +1696,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2205,6 +2217,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@@ -2213,13 +2247,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 747730b3f5..606721f2da 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
#include "src/base/bits.h"
@@ -106,7 +104,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
@@ -427,6 +425,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
DCHECK(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
@@ -648,15 +647,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2267,6 +2274,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(eq, instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+ __ b(eq, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
@@ -2969,13 +2982,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(r0));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3068,7 +3099,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3162,17 +3193,13 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ vldr(double_scratch0().low(), scratch0(), base_offset);
__ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
@@ -3184,29 +3211,22 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3216,8 +3236,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3327,7 +3345,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3570,12 +3588,11 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ push(cp); // The context is the first argument.
__ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4220,6 +4237,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
@@ -4262,10 +4303,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4278,8 +4316,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
__ add(address, external_pointer, Operand(key, LSL, shift_size));
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), address, base_offset);
} else { // Storing doubles, not floats.
@@ -4292,30 +4329,21 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
base_offset);
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ strb(value, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ strh(value, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ str(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4421,7 +4449,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5630,10 +5658,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
- __ b(ge, false_label);
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
+ final_branch_condition = lt;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
@@ -5680,6 +5705,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ b(false_label);
}
@@ -5800,8 +5836,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm(),
- CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
+ PredictableCodeSizeScope predictable(masm());
+ predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
DCHECK(instr->context()->IsRegister());
DCHECK(ToRegister(instr->context()).is(cp));
CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index f8e4a7f680..31feb11edc 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
index 55206d3e60..88f1a7bb67 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 61e484bd85..4034fa95a4 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -4,8 +4,6 @@
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
@@ -13,7 +11,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -1434,10 +1432,11 @@ void MacroAssembler::IsObjectNameType(Register object,
void MacroAssembler::DebugBreak() {
mov(r0, Operand::Zero());
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ mov(r1,
+ Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -1875,26 +1874,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- mov(scratch, Operand(new_space_allocation_top));
- ldr(scratch, MemOperand(scratch));
- cmp(object, scratch);
- Check(lt, kUndoAllocationOfNonAllocatedMemory);
-#endif
- // Write the address of the object to un-allocate as the current top.
- mov(scratch, Operand(new_space_allocation_top));
- str(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -3809,23 +3788,35 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
+ ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ b(eq, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+ cmp(scratch1, Operand(JS_OBJECT_TYPE));
+ b(lo, found);
+
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
b(eq, found);
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- cmp(current, Operand(factory->null_value()));
+ CompareRoot(current, Heap::kNullValueRootIndex);
b(ne, &loop_again);
+
+ bind(&end);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 7ece4b2fa6..5ec2bd3f8b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -13,6 +13,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_r0_Code};
+const Register kReturnRegister1 = {kRegister_r1_Code};
+const Register kJSFunctionRegister = {kRegister_r1_Code};
+const Register kContextRegister = {kRegister_r7_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -250,7 +263,7 @@ class MacroAssembler: public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
@@ -325,9 +338,7 @@ class MacroAssembler: public Assembler {
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
+ DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -347,12 +358,7 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
- DCHECK(!src1.is(src4));
- DCHECK(!src2.is(src4));
- DCHECK(!src3.is(src4));
+ DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -374,6 +380,36 @@ class MacroAssembler: public Assembler {
}
}
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5, Condition cond = al) {
+ DCHECK(!AreAliased(src1, src2, src3, src4, src5));
+ if (src1.code() > src2.code()) {
+ if (src2.code() > src3.code()) {
+ if (src3.code() > src4.code()) {
+ if (src4.code() > src5.code()) {
+ stm(db_w, sp,
+ src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
+ cond);
+ } else {
+ stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+ cond);
+ str(src5, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+ } else {
+ stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ Push(src4, src5, cond);
+ }
+ } else {
+ stm(db_w, sp, src1.bit() | src2.bit(), cond);
+ Push(src3, src4, src5, cond);
+ }
+ } else {
+ str(src1, MemOperand(sp, 4, NegPreIndex), cond);
+ Push(src2, src3, src4, src5, cond);
+ }
+ }
+
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
DCHECK(!src1.is(src2));
@@ -387,9 +423,7 @@ class MacroAssembler: public Assembler {
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
+ DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -409,12 +443,7 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
- DCHECK(!src1.is(src4));
- DCHECK(!src2.is(src4));
- DCHECK(!src3.is(src4));
+ DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -745,13 +774,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -1513,7 +1535,7 @@ class CodePatcher {
CodePatcher(byte* address,
int instructions,
FlushICache flush_cache = FLUSH);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1539,7 +1561,7 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
deleted file mode 100644
index 9f4b4af42d..0000000000
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ /dev/null
@@ -1,1199 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
-#include "src/unicode.h"
-
-#include "src/arm/regexp-macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - r4 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - r5 : Pointer to current code object (Code*) including heap object tag.
- * - r6 : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - r7 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : Points to tip of backtrack stack
- * - r9 : Unused, might be used by C code and expected unchanged.
- * - r10 : End of input (points to byte after last character in input).
- * - r11 : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : Points to tip of C stack.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- * - fp[56] Isolate* isolate (address of the current isolate)
- * - fp[52] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[48] stack_area_base (high end of the memory area to use as
- * backtracking stack).
- * - fp[44] capture array size (may fit multiple sets of matches)
- * - fp[40] int* capture_array (int[num_saved_registers_], for output).
- * - fp[36] secondary link/return address used by native call.
- * --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
- * - fp[0..24] backup of registers r4..r10.
- * --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
- * - fp[-12] start index (character index of start).
- * - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
- * non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * Address secondary_return_address, // Only used by native call.
- * int* capture_output_array,
- * byte* stack_area_base,
- * bool direct_call = false)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm/simulator-arm.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
- Mode mode,
- int registers_to_save)
- : NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- DCHECK_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerARM::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(current_input_offset(),
- current_input_offset(), Operand(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
- if (by != 0) {
- __ ldr(r0, register_location(reg));
- __ add(r0, r0, Operand(by));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(r0);
- __ add(pc, r0, Operand(code_pointer()));
-}
-
-
-void RegExpMacroAssemblerARM::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(gt, on_greater);
-}
-
-
-void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(lt, on_less);
-}
-
-
-void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
- __ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
- __ cmp(current_input_offset(), r0);
- __ add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ ldr(r0, register_location(start_reg)); // Index of start of capture
- __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
- __ sub(r1, r1, r0, SetCC); // Length of capture.
-
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- if (mode_ == LATIN1) {
- Label success;
- Label fail;
- Label loop_check;
-
- // r0 - offset of start of capture
- // r1 - length of capture
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r0, Operand(r1));
-
- // r0 - Address of start of capture.
- // r1 - Address of end of capture
- // r2 - Address of current input position.
-
- Label loop;
- __ bind(&loop);
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- __ cmp(r4, r3);
- __ b(eq, &loop_check);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ orr(r3, r3, Operand(0x20)); // Convert capture character to lower-case.
- __ orr(r4, r4, Operand(0x20)); // Also convert input character.
- __ cmp(r4, r3);
- __ b(ne, &fail);
- __ sub(r3, r3, Operand('a'));
- __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
- __ b(ls, &loop_check); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ sub(r3, r3, Operand(224 - 'a'));
- __ cmp(r3, Operand(254 - 224));
- __ b(hi, &fail); // Weren't Latin-1 letters.
- __ cmp(r3, Operand(247 - 224)); // Check for 247.
- __ b(eq, &fail);
-
- __ bind(&loop_check);
- __ cmp(r0, r1);
- __ b(lt, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- BranchOrBacktrack(al, on_no_match);
-
- __ bind(&success);
- // Compute new value of character position after the matched part.
- __ sub(current_input_offset(), r2, end_of_input_address());
- } else {
- DCHECK(mode_ == UC16);
- int argument_count = 4;
- __ PrepareCallCFunction(argument_count, r2);
-
- // r0 - offset of start of capture
- // r1 - length of capture
-
- // Put arguments into arguments registers.
- // Parameters are
- // r0: Address byte_offset1 - Address captured substring's start.
- // r1: Address byte_offset2 - Address of current character position.
- // r2: size_t byte_length - length of capture in bytes(!)
- // r3: Isolate* isolate
-
- // Address of start of capture.
- __ add(r0, r0, Operand(end_of_input_address()));
- // Length of capture.
- __ mov(r2, Operand(r1));
- // Save length in callee-save register for use on return.
- __ mov(r4, Operand(r1));
- // Address of current input position.
- __ add(r1, current_input_offset(), Operand(end_of_input_address()));
- // Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
- __ CallCFunction(function, argument_count);
- }
-
- // Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), Operand(r4));
- }
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
-
- // Find length of back-referenced capture.
- __ ldr(r0, register_location(start_reg));
- __ ldr(r1, register_location(start_reg + 1));
- __ sub(r1, r1, r0, SetCC); // Length to check.
- // Succeed on empty capture (including no capture).
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- // Compute pointers to match string and capture string
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r1, Operand(r0));
-
- Label loop;
- __ bind(&loop);
- if (mode_ == LATIN1) {
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- } else {
- DCHECK(mode_ == UC16);
- __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
- }
- __ cmp(r3, r4);
- BranchOrBacktrack(ne, on_no_match);
- __ cmp(r0, r1);
- __ b(lt, &loop);
-
- // Move current character position to position after match.
- __ sub(current_input_offset(), r2, end_of_input_address());
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ tst(current_character(), Operand(mask));
- } else {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- }
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ tst(current_character(), Operand(mask));
- } else {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- }
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
- __ sub(r0, current_character(), Operand(minus));
- __ and_(r0, r0, Operand(mask));
- __ cmp(r0, Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ sub(r0, current_character(), Operand(from));
- __ cmp(r0, Operand(to - from));
- BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ sub(r0, current_character(), Operand(from));
- __ cmp(r0, Operand(to - from));
- BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
-}
-
-
-void RegExpMacroAssemblerARM::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(r0, Operand(table));
- if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
- __ and_(r1, current_character(), Operand(kTableSize - 1));
- __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ add(r1,
- current_character(),
- Operand(ByteArray::kHeaderSize - kHeapObjectTag));
- }
- __ ldrb(r0, MemOperand(r0, r1));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == LATIN1) {
- // One byte space characters are '\t'..'\r', ' ' and \u00a0.
- Label success;
- __ cmp(current_character(), Operand(' '));
- __ b(eq, &success);
- // Check range 0x09..0x0d
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- __ b(ls, &success);
- // \u00a0 (NBSP).
- __ cmp(r0, Operand(0x00a0 - '\t'));
- BranchOrBacktrack(ne, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // The emitted code for generic character classes is good enough.
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(r0, Operand('9' - '0'));
- BranchOrBacktrack(hi, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(r0, Operand('9' - '0'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- BranchOrBacktrack(ls, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(ls, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == LATIN1) {
- BranchOrBacktrack(hi, on_no_match);
- } else {
- Label done;
- __ b(ls, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != LATIN1) {
- // Table is 256 entries, so all Latin1 characters can be tested.
- __ cmp(current_character(), Operand('z'));
- BranchOrBacktrack(hi, on_no_match);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(eq, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != LATIN1) {
- // Table is 256 entries, so all Latin1 characters can be tested.
- __ cmp(current_character(), Operand('z'));
- __ b(hi, &done);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_no_match);
- if (mode_ != LATIN1) {
- __ bind(&done);
- }
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerARM::Fail() {
- __ mov(r0, Operand(FAILURE));
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
- Label return_r0;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
- RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
- __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand::Zero());
- __ push(r0); // Make room for success counter and initialize it to 0.
- __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ sub(r0, sp, r0, SetCC);
- // Handle it if the stack pointer is already below the stack limit.
- __ b(ls, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
- __ b(hs, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
- // Load string end.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- // Load input start.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
- // Find negative length (offset of start relative to end).
- __ sub(current_input_offset(), r0, end_of_input_address());
- // Set r0 to address of char before start of the input string
- // (effectively string position -1).
- __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r0, current_input_offset(), Operand(char_size()));
- __ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand::Zero());
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ str(r0, register_location(i));
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
- __ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r1, end_of_input_address(), r1);
- // r1 is length of input in bytes.
- if (mode_ == UC16) {
- __ mov(r1, Operand(r1, LSR, 1));
- }
- // r1 is length of input in characters.
- __ add(r1, r1, Operand(r2));
- // r1 is length of string in characters.
-
- DCHECK_EQ(0, num_saved_registers_ % 2);
- // Always an even number of capture registers. This allows us to
- // unroll the loop once to add an operation between a load of a register
- // and the following use of that register.
- for (int i = 0; i < num_saved_registers_; i += 2) {
- __ ldr(r2, register_location(i));
- __ ldr(r3, register_location(i + 1));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in r4 for the zero-length check later.
- __ mov(r4, r2);
- }
- if (mode_ == UC16) {
- __ add(r2, r1, Operand(r2, ASR, 1));
- __ add(r3, r1, Operand(r3, ASR, 1));
- } else {
- __ add(r2, r1, Operand(r2));
- __ add(r3, r1, Operand(r3));
- }
- __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ sub(r1, r1, Operand(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(r1, Operand(num_saved_registers_));
- __ b(lt, &return_r0);
-
- __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
- __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // r4: capture start index
- __ cmp(current_input_offset(), r4);
- // Not a zero-length match, restart.
- __ b(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand::Zero());
- __ b(eq, &exit_label_);
- // Advance current position after a zero-length match.
- __ add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- }
-
- __ b(&load_char_start_regexp);
- } else {
- __ mov(r0, Operand(SUCCESS));
- }
- }
-
- // Exit and return r0
- __ bind(&exit_label_);
- if (global()) {
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_r0);
- // Skip sp past regexp registers and local variables..
- __ mov(sp, frame_pointer());
- // Restore registers r4..r11 and return (restoring lr to pc).
- __ ldm(ia_w, sp, registers_to_retain | pc.bit());
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand::Zero());
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ b(ne, &return_r0);
-
- // String might have moved: Reload end of string from frame.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
-
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r0);
- __ mov(r0, backtrack_stackpointer());
- __ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ cmp(r0, Operand::Zero());
- __ b(eq, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), r0);
- // Restore saved registers and continue.
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerARM::GoTo(Label* to) {
- BranchOrBacktrack(al, to);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(ge, if_ge);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(lt, if_lt);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(current_input_offset()));
- BranchOrBacktrack(eq, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerARM::Implementation() {
- return kARMImplementation;
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerARM::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PopRegister(int register_index) {
- Pop(r0);
- __ str(r0, register_location(register_index));
-}
-
-
-void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- __ mov_label_offset(r0, label);
- Push(r0);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ ldr(r0, register_location(register_index));
- Push(r0);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
- __ ldr(current_input_offset(), register_location(reg));
-}
-
-
-void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
- __ ldr(backtrack_stackpointer(), register_location(reg));
- __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
-}
-
-
-void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(current_input_offset(), Operand(-by * char_size()));
- __ b(ge, &after_position);
- __ mov(current_input_offset(), Operand(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
- DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(r0, Operand(to));
- __ str(r0, register_location(register_index));
-}
-
-
-bool RegExpMacroAssemblerARM::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ str(current_input_offset(), register_location(reg));
- } else {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
- DCHECK(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
- __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r0, backtrack_stackpointer(), r1);
- __ str(r0, register_location(reg));
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- __ PrepareCallCFunction(3, scratch);
-
- // RegExp code frame pointer.
- __ mov(r2, frame_pointer());
- // Code* of self.
- __ mov(r1, Operand(masm_->CodeObject()));
-
- // We need to make room for the return address on the stack.
- int stack_alignment = base::OS::ActivationFrameAlignment();
- DCHECK(IsAligned(stack_alignment, kPointerSize));
- __ sub(sp, sp, Operand(stack_alignment));
-
- // r0 will point to the return address, placed by DirectCEntry.
- __ mov(r0, sp);
-
- ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(isolate());
- __ mov(ip, Operand(stack_guard_check));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm_, ip);
-
- // Drop the return address from the stack.
- __ add(sp, sp, Operand(stack_alignment));
-
- DCHECK(stack_alignment != 0);
- __ ldr(sp, MemOperand(sp, 0));
-
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-template <typename T>
-static T* frame_entry_address(Address re_frame, int frame_offset) {
- return reinterpret_cast<T*>(re_frame + frame_offset);
-}
-
-
-int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<int>(re_frame, kStartIndex),
- frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
-}
-
-
-MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
- DCHECK(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
- BranchOrBacktrack(ge, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ b(condition, &backtrack_label_);
- return;
- }
- __ b(condition, to);
-}
-
-
-void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) {
- __ bl(to, cond);
-}
-
-
-void RegExpMacroAssemblerARM::SafeReturn() {
- __ pop(lr);
- __ add(pc, lr, Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
- __ bind(name);
- __ sub(lr, lr, Operand(masm_->CodeObject()));
- __ push(lr);
-}
-
-
-void RegExpMacroAssemblerARM::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
- __ str(source,
- MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
-}
-
-
-void RegExpMacroAssemblerARM::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
- __ ldr(target,
- MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
-}
-
-
-void RegExpMacroAssemblerARM::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(sp, r0);
- SafeCall(&check_preempt_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
-}
-
-
-bool RegExpMacroAssemblerARM::CanReadUnaligned() {
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
- if (cp_offset != 0) {
- // r4 is not being used to store the capture start index at this point.
- __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r4;
- }
- // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
- // and the operating system running on the target allow it.
- // If unaligned load/stores are not supported then this function must only
- // be used to load a single character at a time.
- if (!CanReadUnaligned()) {
- DCHECK(characters == 1);
- }
-
- if (mode_ == LATIN1) {
- if (characters == 4) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else if (characters == 2) {
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- DCHECK(characters == 1);
- __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
- }
- } else {
- DCHECK(mode_ == UC16);
- if (characters == 2) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- DCHECK(characters == 1);
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- }
- }
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
deleted file mode 100644
index 078d0dfa62..0000000000
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-
-#include "src/arm/assembler-arm.h"
-#include "src/arm/assembler-arm-inl.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone, Mode mode,
- int registers_to_save);
- virtual ~RegExpMacroAssemblerARM();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Register 4..11.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- static const int kBacktrackConstantPoolSize = 4;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- MemOperand register_location(int register_index);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- inline Register current_input_offset() { return r6; }
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r7; }
-
- // Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return r8; }
-
- // Register holding pointer to the current code object.
- inline Register code_pointer() { return r5; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to, Condition cond = al);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- Isolate* isolate() const { return masm_->isolate(); }
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 3a02ee0094..5da6204050 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -6,8 +6,6 @@
#include <stdlib.h>
#include <cmath>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
@@ -1229,9 +1227,15 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
@@ -4011,6 +4015,9 @@ void Simulator::Execute() {
void Simulator::CallInternal(byte* entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index eea43efc53..a972a77d41 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -181,12 +181,12 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
- Address get_sp() {
+ Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes ARM instructions until the PC reaches end_sim_pc.
void Execute();
@@ -439,15 +439,14 @@ class Simulator {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {