summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-12-05 16:29:01 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-12-05 16:29:01 -0800
commit21d081fd7f83aa168ea0bef0870c7f1fed410a63 (patch)
tree23a04eef49364b1cbee204a87fd0904be2430c2e /deps/v8/src/arm
parente90623edc2befb06602ff8c3e01809ba0a21d593 (diff)
downloadnode-new-21d081fd7f83aa168ea0bef0870c7f1fed410a63.tar.gz
Upgrade V8 to 3.7.12
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h41
-rw-r--r--deps/v8/src/arm/assembler-arm.cc12
-rw-r--r--deps/v8/src/arm/assembler-arm.h16
-rw-r--r--deps/v8/src/arm/builtins-arm.cc1206
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1148
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h321
-rw-r--r--deps/v8/src/arm/codegen-arm.cc349
-rw-r--r--deps/v8/src/arm/codegen-arm.h26
-rw-r--r--deps/v8/src/arm/constants-arm.h47
-rw-r--r--deps/v8/src/arm/debug-arm.cc88
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc116
-rw-r--r--deps/v8/src/arm/frames-arm.h43
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc781
-rw-r--r--deps/v8/src/arm/ic-arm.cc259
-rw-r--r--deps/v8/src/arm/lithium-arm.cc177
-rw-r--r--deps/v8/src/arm/lithium-arm.h119
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc835
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h64
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc865
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h337
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc20
-rw-r--r--deps/v8/src/arm/simulator-arm.cc29
-rw-r--r--deps/v8/src/arm/simulator-arm.h7
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc1655
24 files changed, 5360 insertions, 3201 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3e19a45385..79f9c7bd2b 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -64,7 +64,9 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
}
@@ -74,9 +76,14 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -98,9 +105,15 @@ Object** RelocInfo::target_object_address() {
}
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -127,10 +140,17 @@ JSGlobalPropertyCell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+ WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -147,6 +167,11 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -195,13 +220,13 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
+ visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(target_reference_address());
+ visitor->VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -221,13 +246,13 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(target_reference_address());
+ StaticVisitor::VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 0ec36921ab..329493a340 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -78,7 +78,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+ CpuFeaturesImpliedByCompiler());
+ ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -86,8 +88,7 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
+ supported_ |= standard_features;
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2505,7 +2506,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2537,7 +2539,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 9a586936fe..247479d730 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -304,9 +304,9 @@ const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Aliases for double registers.
-const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
-const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
-const DwVfpRegister kDoubleRegZero = d14;
+static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
+static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
+static const DwVfpRegister& kDoubleRegZero = d14;
// Coprocessor register
@@ -1209,6 +1209,10 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
@@ -1263,12 +1267,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
- // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
// Decode branch instruction at pos and return branch target pos
int target_at(int pos);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 60d2081c29..d0136f550d 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -86,12 +86,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
// Allocate an empty JSArray. The allocated array is put into the result
// register. An elements backing store is allocated with size initial_capacity
// and filled with the hole values.
@@ -101,16 +95,19 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- int initial_capacity,
Label* gc_required) {
- ASSERT(initial_capacity > 0);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ STATIC_ASSERT(initial_capacity >= 0);
// Load the initial map from the array function.
__ ldr(scratch1, FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
- int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
__ AllocateInNewSpace(size,
result,
scratch2,
@@ -130,6 +127,11 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ mov(scratch3, Operand(0, RelocInfo::NONE));
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+ if (initial_capacity == 0) {
+ __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+ return;
+ }
+
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
@@ -138,7 +140,6 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
__ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
@@ -147,18 +148,29 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array (untagged)
// scratch2: start of next object
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- // Fill the FixedArray with the hole value.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < initial_capacity; i++) {
+ static const int kLoopUnfoldLimit = 4;
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ for (int i = 0; i < initial_capacity; i++) {
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, entry;
+ __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+ __ b(&entry);
+ __ bind(&loop);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(scratch1, scratch2);
+ __ b(lt, &loop);
}
}
@@ -173,7 +185,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array_storage is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
- Register array_size, // As a smi.
+ Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array_storage,
Register elements_array_end,
@@ -181,32 +193,18 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch2,
bool fill_with_hole,
Label* gc_required) {
- Label not_empty, allocated;
-
// Load the initial map from the array function.
__ ldr(elements_array_storage,
FieldMemOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
- // Check whether an empty sized array is requested.
- __ tst(array_size, array_size);
- __ b(ne, &not_empty);
-
- // If an empty array is requested allocate a small elements array anyway. This
- // keeps the code below free of special casing for the empty array.
- int size = JSArray::kSize +
- FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size,
- result,
- elements_array_end,
- scratch1,
- gc_required,
- TAG_OBJECT);
- __ jmp(&allocated);
+ if (FLAG_debug_code) { // Assert that array size is not zero.
+ __ tst(array_size, array_size);
+ __ Assert(ne, "array size is unexpectedly 0");
+ }
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
- __ bind(&not_empty);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ mov(elements_array_end,
Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
@@ -226,7 +224,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array_storage: initial map
// array_size: size of array (smi)
- __ bind(&allocated);
__ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
__ str(elements_array_storage,
@@ -256,14 +253,6 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(array_size, array_size);
- // Length of the FixedArray is the number of pre-allocated elements if
- // the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is stored as a smi.
- __ mov(array_size,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
- LeaveCC,
- eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -311,20 +300,20 @@ static void AllocateJSArray(MacroAssembler* masm,
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more;
+ Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
+ __ bind(&empty_array);
AllocateEmptyJSArray(masm,
r1,
r2,
r3,
r4,
r5,
- JSArray::kPreallocatedArrayElements,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
@@ -339,6 +328,13 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(ne, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
+ __ tst(r2, r2);
+ __ b(ne, &not_empty_array);
+ __ Drop(1); // Adjust stack.
+ __ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
+ __ b(&empty_array);
+
+ __ bind(&not_empty_array);
__ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
__ b(ne, call_generic_code);
@@ -582,10 +578,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
__ pop(function);
__ mov(argument, r0);
__ b(&argument_is_string);
@@ -601,10 +598,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
__ Ret();
}
@@ -617,12 +615,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_call);
+ __ b(ne, &slow);
// Jump to the function-specific construct stub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -631,10 +629,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
+ // r2: object type
+ Label do_call;
+ __ bind(&slow);
+ __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -650,321 +657,329 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
// Enter a construct frame.
- __ EnterConstructFrame();
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the two incoming parameters on the stack.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ push(r0); // Smi-tagged arguments count.
+ __ push(r1); // Constructor function.
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ mov(r2, Operand(debug_step_in_fp));
+ __ ldr(r2, MemOperand(r2));
+ __ tst(r2, r2);
+ __ b(ne, &rt_call);
#endif
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
+ // Load the initial map and verify that it is in fact a map.
+ // r1: constructor function
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &rt_call);
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ b(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r1: constructor function
+ // r2: initial map
+ __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+ __ b(eq, &rt_call);
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+ __ ldrb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ strb(r4, constructor_count);
+ __ b(ne, &allocate);
+
+ __ Push(r1, r2);
+
+ __ push(r1); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(r2);
+ __ pop(r1);
+
+ __ bind(&allocate);
+ }
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- { Label loop, entry;
+ // Now allocate the JSObject on the heap.
+ // r1: constructor function
+ // r2: initial map
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size
+ // r4: JSObject (not tagged)
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(r5, r4);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size (in words)
+ // r4: JSObject (not tagged)
+ // r5: First in-object property of JSObject (not tagged)
+ __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
+ // r0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r0, r6);
+ __ Assert(le, "Unexpected number of pre-allocated property fields.");
+ }
+ __ InitializeFieldsWithFiller(r5, r0, r7);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r5, r6);
- __ b(lt, &loop);
- }
+ __ InitializeFieldsWithFiller(r5, r6, r7);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ add(r4, r4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // r1: constructor function
+ // r4: JSObject
+ // r5: start of next object (not tagged)
+ __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sub(r3, r3, Operand(r6), SetCC);
+
+ // Done if no extra properties are to be allocated.
+ __ b(eq, &allocated);
+ __ Assert(pl, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: start of next object
+ __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+ __ mov(r2, r5);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+ // Initialize the fields to undefined.
+ // r1: constructor function
+ // r2: First element of FixedArray (not tagged)
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ cmp(r7, r8);
+ __ Assert(eq, "Undefined value not loaded.");
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(r2, r6);
+ __ b(lt, &loop);
+ }
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // r1: constructor function
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // r4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(r4, r5);
+ }
- // Check if a non-empty properties array is needed. Continue with allocated
- // object if not fall through to runtime call if it is.
+ // Allocate the new receiver object using the runtime call.
// r1: constructor function
+ __ bind(&rt_call);
+ __ push(r1); // argument for Runtime_NewObject
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(r4, r0);
+
+ // Receiver for constructor call allocated.
// r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields and
- // in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
+ __ bind(&allocated);
+ __ push(r4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ push(r1); // Constructor function.
+ __ push(r4); // Receiver.
+
+ // Reload the number of arguments from the stack.
// r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below
+ __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+ // Copy arguments and receiver to the expression stack.
+ // r0: number of arguments
+ // r2: address of last argument (caller sp)
// r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+ // r3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r3, r3, Operand(2), SetCC);
+ __ b(ge, &loop);
- // Continue with JSObject being successfully allocated
+ // Call the function.
+ // r0: number of arguments
// r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
- __ push(r4);
-
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, kPointerSize));
- __ push(r1); // Constructor function.
- __ push(r4); // Receiver.
-
- // Reload the number of arguments from the stack.
- // r1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
- // Setup pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Setup number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r2: address of last argument (caller sp)
- // r1: constructor function
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
+ // Pop the function from the stack.
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ pop();
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Restore context from the frame.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
}
- // Pop the function from the stack.
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ pop();
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -997,63 +1012,64 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: argv
// r5-r7, cp may be clobbered
- // Clear the context before we push it when entering the JS frame.
+ // Clear the context before we push it when entering the internal frame.
__ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Set up the context from the function argument.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(r10, Operand(roots_address));
+ // Set up the roots register.
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(r10, Operand(roots_array_start));
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
+ // Push the function and the receiver onto the stack.
+ __ push(r1);
+ __ push(r2);
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
+ // Copy arguments to the stack in a loop.
+ // r1: function
+ // r3: argc
+ // r4: argv, i.e. points to first arg
+ Label loop, entry;
+ __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+ // r2 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r0, MemOperand(r0)); // dereference handle
+ __ push(r0); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, r2);
+ __ b(ne, &loop);
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mov(r5, Operand(r4));
+ __ mov(r6, Operand(r4));
+ __ mov(r7, Operand(r4));
+ if (kR9Available == 1) {
+ __ mov(r9, Operand(r4));
+ }
- // Exit the JS frame and remove the parameters (except function), and return.
- // Respect ABI stack constraint.
- __ LeaveInternalFrame();
+ // Invoke the code and pass argc as r0.
+ __ mov(r0, Operand(r3));
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+ // Exit the JS frame and remove the parameters (except function), and
+ // return.
+ // Respect ABI stack constraint.
+ }
__ Jump(lr);
// r0: result
@@ -1072,26 +1088,27 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1100,26 +1117,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1128,12 +1146,13 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- __ EnterInternalFrame();
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
// Get the full codegen state from the stack and untag it -> r6.
__ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1173,9 +1192,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
@@ -1191,10 +1211,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1276,17 +1297,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(r2, r0);
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ __ push(r0);
+
+ __ push(r2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(r2, r0);
+
+ __ pop(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+ // Exit the internal frame.
+ }
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ LeaveInternalFrame();
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1406,156 +1433,157 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub(r2, sp, r2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(gt, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ push(r1);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
- __ push(r1);
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(r0); // limit
+ __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ push(r1);
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
+ // Get the receiver.
+ __ ldr(r0, MemOperand(fp, kRecvOffset));
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &push_receiver);
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ // Change context eagerly to get the right global object if necessary.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in r1.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ b(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &push_receiver);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
+ // Do not transform the receiver for strict mode functions.
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &push_receiver);
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(r0, &call_to_object);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
+ // Check if the receiver is already a JavaScript object.
+ // r0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &push_receiver);
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
+ // Convert the receiver to a regular object.
+ // r0: receiver
+ __ bind(&call_to_object);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ b(&push_receiver);
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // r0: receiver
+ __ bind(&push_receiver);
+ __ push(r0);
- // Tear down the internal frame and remove function, receiver and args.
- __ LeaveInternalFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ b(&entry);
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r0: current argument index
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(fp, kArgsOffset));
+ __ push(r1);
+ __ push(r0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r0);
+
+ // Use inline caching to access the arguments.
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ add(r0, r0, Operand(1 << kSmiTagSize));
+ __ str(r0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ldr(r1, MemOperand(fp, kLimitOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &loop);
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &call_proxy);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ frame_scope.GenerateLeaveFrame();
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Jump(lr);
+
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(r1); // add function proxy as last argument
+ __ add(r0, r0, Operand(1));
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
- __ LeaveInternalFrame();
+ // Tear down the internal frame and remove function, receiver and args.
+ }
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index e65f6d9b69..8b1d0c4b37 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -98,9 +98,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
- int map_index = strict_mode_ == kStrictMode
- ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
- : Context::FUNCTION_MAP_INDEX;
+ int map_index = (language_mode_ == CLASSIC_MODE)
+ ? Context::FUNCTION_MAP_INDEX
+ : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// as the map of the allocated object.
@@ -189,6 +189,121 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: function.
+ // [sp + kPointerSize]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ r0, r1, r2, &gc, TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0));
+
+ // Load the serialized scope info from the stack.
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(r3, &after_sentinel);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmp(r3, Operand::Zero());
+ __ Assert(eq, message);
+ }
+ __ ldr(r3, GlobalObjectOperand());
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
+ __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
+
+ // Copy the global object from the previous context.
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ //
+ // r3: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = 0;
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+ int size = JSArray::kSize + elements_size;
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ r0,
+ r1,
+ r2,
+ fail,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ ASSERT((elements_size % kPointerSize) == 0);
+ __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+ }
+}
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -196,10 +311,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// [sp + kPointerSize]: literal index.
// [sp + (2 * kPointerSize)]: literals array.
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@@ -207,64 +318,111 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case);
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &check_fast_elements);
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&check_fast_elements);
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &double_elements);
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+ message = "Expected (writable) fixed double array";
+ expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
__ push(r3);
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, expected_map_index);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, expected_map_index);
__ Assert(eq, message);
__ pop(r3);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- &slow_case,
- TAG_OBJECT);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- // Copy the elements array.
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: object literal flags.
+ // [sp + kPointerSize]: constant properties.
+ // [sp + (2 * kPointerSize)]: literal index.
+ // [sp + (3 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+ __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+ __ cmp(r0, Operand(size >> kPointerSizeLog2));
+ __ b(ne, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
}
// Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
@@ -838,9 +996,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 0, 2);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@@ -857,6 +1017,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+ // These variants are compiled ahead of time. See next method.
+ if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+ return true;
+ }
+ if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+ return true;
+ }
+ // Other register combinations are generated as and when they are needed,
+ // so it is unsafe to call them from stubs (we can't generate a stub while
+ // we are generating a stub).
+ return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+ WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+ WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+ stub1.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1197,6 +1380,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
+
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@@ -1214,7 +1399,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1791,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@@ -1713,6 +1900,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ stm(db_w, sp, kCallerSaved | lr.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = r1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ }
+ __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
+}
+
+
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@@ -1866,12 +2088,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ }
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +2135,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ }
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@@ -2028,6 +2252,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -3086,6 +3314,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmp(r3, r5);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into r0.
__ pop();
@@ -3098,6 +3329,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
} // if (CpuFeatures::IsSupported(VFP3))
__ bind(&calculate);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
ExternalReference runtime_function =
@@ -3133,10 +3367,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@@ -3149,14 +3384,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
}
@@ -3173,6 +3409,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
+ AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3182,6 +3419,10 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
__ CallCFunction(ExternalReference::math_cos_double_function(isolate),
0, 1);
break;
+ case TranscendentalCache::TAN:
+ __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+ 0, 1);
+ break;
case TranscendentalCache::LOG:
__ CallCFunction(ExternalReference::math_log_double_function(isolate),
0, 1);
@@ -3199,6 +3440,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
// Add more cases when necessary.
case TranscendentalCache::SIN: return Runtime::kMath_sin;
case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::TAN: return Runtime::kMath_tan;
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
@@ -3268,11 +3510,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3298,11 +3543,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3319,6 +3567,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub(kSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@@ -3430,8 +3709,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r3, MemOperand(ip));
+ __ mov(r3, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@@ -3469,6 +3747,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3527,7 +3806,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// [sp+0]: argv
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
@@ -3590,31 +3869,33 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&cont);
__ push(ip);
- // Call a faked try-block that does the invoke.
- __ bl(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
// Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r5, MemOperand(ip));
+ __ mov(r5, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@@ -3738,7 +4019,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
@@ -3851,10 +4132,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- __ EnterInternalFrame();
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4250,10 +4532,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4375,25 +4653,39 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label seq_string;
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string.
- __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
+ // First check for flat string. None of the following string type tests will
+ // succeed if subject is not a string or a short external string.
+ __ and_(r1,
+ r0,
+ Operand(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask),
+ SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
+ // r1: whether subject is a string and if yes, its string representation
// Check for flat cons string or sliced string.
// A flat cons string is a cons string where the second part is the empty
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
__ b(lt, &cons_string);
- __ b(eq, &runtime);
+ __ b(eq, &external_string);
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
// String is sliced.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4404,8 +4696,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// String is a cons string, check whether it is flat.
__ bind(&cons_string);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
- __ cmp(r0, r1);
+ __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
// Is first part of cons or parent of slice a flat string?
@@ -4414,7 +4705,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &runtime);
+ __ b(ne, &external_string);
+
__ bind(&seq_string);
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -4480,8 +4772,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4823,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r1, MemOperand(r1, 0));
+ __ mov(r1, Operand(isolate->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@@ -4575,16 +4865,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+ __ mov(r2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ r2,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -4615,6 +4914,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
+ // External string. Short external strings have already been ruled out.
+ // r0: scratch
+ __ bind(&external_string);
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ tst(r0, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found");
+ }
+ __ ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(subject,
+ subject,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ jmp(&seq_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4712,7 +5031,24 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // r1 : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -4727,16 +5063,12 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function);
@@ -4774,7 +5106,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_FUNCTION);
+ __ SetCallKind(r5, CALL_AS_METHOD);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -4855,100 +5187,41 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
+ __ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
+ __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
- // Handle non-flat strings.
- __ and_(result_, result_, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(result_, Operand(kExternalStringTag));
- __ b(gt, &sliced_string);
- __ b(eq, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ add(scratch_, scratch_, result_);
- __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
- // Assure that we are dealing with a sequential string. Go to runtime if not.
- __ bind(&assure_seq_string);
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // Check that parent is not an external string. Go to runtime otherwise.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(ne, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(result_, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
__ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- scratch_,
+ result_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ Push(object_, index_);
+ __ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4959,15 +5232,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
+ __ Move(index_, r0);
__ pop(object_);
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4976,6 +5248,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
+ __ mov(index_, Operand(index_, LSL, kSmiTagSize));
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
__ Move(result_, r0);
@@ -5012,7 +5285,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
@@ -5037,76 +5311,13 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
@@ -5359,9 +5570,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
static const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
+ Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- Register candidate = scratch5; // Scratch register contains candidate.
-
// Calculate entry in symbol table.
if (i > 0) {
__ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
@@ -5386,11 +5596,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ cmp(undefined, candidate);
__ b(eq, not_found);
- // Must be null (deleted entry).
+ // Must be the hole (deleted entry).
if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or null");
+ __ Assert(eq, "oddball in symbol table is not undefined or the hole");
}
__ jmp(&next_probe[i]);
@@ -5418,7 +5628,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ jmp(not_found);
// Scratch register contains result when we fall through to here.
- Register result = scratch;
+ Register result = candidate;
__ bind(&found_in_symbol_table);
__ Move(r0, result);
}
@@ -5430,7 +5640,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
// hash = character + (character << 10);
__ add(hash, character, Operand(character, LSL, 10));
// hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
+ __ eor(hash, hash, Operand(hash, LSR, 6));
}
@@ -5442,7 +5652,7 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
// hash += hash << 10;
__ add(hash, hash, Operand(hash, LSL, 10));
// hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
+ __ eor(hash, hash, Operand(hash, LSR, 6));
}
@@ -5451,12 +5661,15 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// hash += hash << 3;
__ add(hash, hash, Operand(hash, LSL, 3));
// hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, ASR, 11));
+ __ eor(hash, hash, Operand(hash, LSR, 11));
// hash += hash << 15;
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+ uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+ __ and_(hash, hash, Operand(kHashShiftCutOffMask));
+
// if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, ne);
+ __ mov(hash, Operand(27), LeaveCC, eq);
}
@@ -5671,15 +5884,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r3: from index (untagged smi)
// r6 (a.k.a. to): to (smi)
// r7 (a.k.a. from): from offset (smi)
- Label allocate_slice, sliced_string, seq_string;
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kStringRepresentationMask));
- __ b(eq, &seq_string);
+ Label allocate_slice, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ tst(r1, Operand(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ b(eq, &runtime);
+ __ b(eq, &seq_or_external_string);
__ tst(r1, Operand(kSlicedNotConsMask));
__ b(ne, &sliced_string);
@@ -5698,8 +5908,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
__ jmp(&allocate_slice);
- __ bind(&seq_string);
- // Sequential string. Just move string to the right register.
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
__ mov(r5, r0);
__ bind(&allocate_slice);
@@ -6425,12 +6635,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ push(ip);
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6469,14 +6680,13 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0) {
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -6534,14 +6744,12 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- MaybeObject* result = masm->TryCallStub(&stub);
- if (result->IsFailure()) return result;
+ __ CallStub(&stub);
__ tst(r0, Operand(r0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
__ b(ne, miss);
- return result;
}
@@ -6556,6 +6764,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
+ ASSERT(!elements.is(scratch1));
+ ASSERT(!elements.is(scratch2));
+ ASSERT(!name.is(scratch1));
+ ASSERT(!name.is(scratch2));
+
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
@@ -6599,8 +6812,14 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
~(scratch1.bit() | scratch2.bit());
__ stm(db_w, sp, spill_mask);
- __ Move(r0, elements);
- __ Move(r1, name);
+ if (name.is(r0)) {
+ ASSERT(!elements.is(r1));
+ __ Move(r1, name);
+ __ Move(r0, elements);
+ } else {
+ __ Move(r0, elements);
+ __ Move(r1, name);
+ }
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
__ tst(r0, Operand(r0));
@@ -6613,6 +6832,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@@ -6702,6 +6923,333 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { r6, r4, r7, EMIT_REMEMBERED_SET },
+ { r6, r2, r7, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+ // Also used in KeyedStoreIC::GenerateGeneric.
+ { r3, r4, r5, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { r4, r1, r2, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r1, r2, r3, EMIT_REMEMBERED_SET },
+ { r3, r2, r1, EMIT_REMEMBERED_SET },
+ // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r2, r1, r3, EMIT_REMEMBERED_SET },
+ { r3, r1, r2, EMIT_REMEMBERED_SET },
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { r4, r2, r3, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // and ElementsTransitionGenerator::GenerateDoubleToObject
+ { r2, r3, r9, EMIT_REMEMBERED_SET },
+ // ElementsTransitionGenerator::GenerateDoubleToObject
+ { r6, r2, r0, EMIT_REMEMBERED_SET },
+ { r2, r6, r9, EMIT_REMEMBERED_SET },
+ // StoreArrayLiteralElementStub::Generate
+ { r5, r0, r6, EMIT_REMEMBERED_SET },
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+ return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(r0));
+ __ Move(address, regs_.address());
+ __ Move(r0, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(r1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ ldr(r1, MemOperand(address, 0));
+ }
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : element value to store
+ // -- r1 : array literal
+ // -- r2 : map of array literal
+ // -- r3 : element index as smi
+ // -- r4 : array literal index in function as smi
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ __ CheckFastElements(r2, r5, &double_elements);
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(r0, &smi_element);
+ __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ // call.
+ __ Push(r1, r3, r0);
+ __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
+ __ Push(r5, r4);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r0, MemOperand(r6, 0));
+ // Update the write barrier for the array store.
+ __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10,
+ &slow_elements);
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 557f7e6d41..38ed476cc1 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -58,6 +58,25 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -117,7 +136,7 @@ class UnaryOpStub: public CodeStub {
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -216,7 +235,7 @@ class BinaryOpStub: public CodeStub {
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
@@ -225,6 +244,70 @@ class BinaryOpStub: public CodeStub {
};
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -323,6 +406,9 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
+ bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+
private:
Register the_int_;
Register the_heap_number_;
@@ -371,6 +457,218 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+ ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+ ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ Assembler::kInstrSize);
+
+ if (Assembler::IsBranch(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+ if (Assembler::IsBranch(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(Assembler::IsTstImmediate(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL,
+ stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->pop(scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ masm->sub(sp,
+ sp,
+ Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ // Save all VFP registers except d0.
+ for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+ }
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ // Restore all VFP registers except d0.
+ for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+ }
+ masm->add(sp,
+ sp,
+ Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ }
+ masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ Register GetRegThatIsNotOneOf(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 4> {};
+ class ValueBits: public BitField<int, 4, 4> {};
+ class AddressBits: public BitField<int, 8, 4> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
@@ -558,14 +856,13 @@ class StringDictionaryLookupStub: public CodeStub {
void Generate(MacroAssembler* masm);
- MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0);
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<String> name,
+ Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
@@ -575,6 +872,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -587,7 +886,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index bf748a9b6a..3371e8a6b1 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -30,22 +30,367 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : target map, scratch for subsequent call
+ // -- r4 : scratch (elements)
+ // -----------------------------------
+ // Set transitioned map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : target map, scratch for subsequent call
+ // -- r4 : scratch (elements)
+ // -----------------------------------
+ Label loop, entry, convert_hole, gc_required;
+ bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+ __ push(lr);
+
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ // r4: source FixedArray
+ // r5: number of elements (smi-tagged)
+
+ // Allocate new FixedDoubleArray.
+ __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(lr, lr, Operand(r5, LSL, 2));
+ __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ // r6: destination FixedDoubleArray, not tagged as heap object
+ __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+ // Set destination FixedDoubleArray's length.
+ __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+ // Update receiver's map.
+
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ add(r3, r6, Operand(kHeapObjectTag));
+ __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ RecordWriteField(r2,
+ JSObject::kElementsOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r6, r7, Operand(r5, LSL, 2));
+ __ mov(r4, Operand(kHoleNanLower32));
+ __ mov(r5, Operand(kHoleNanUpper32));
+ // r3: begin of source FixedArray element fields, not tagged
+ // r4: kHoleNanLower32
+ // r5: kHoleNanUpper32
+ // r6: end of destination FixedDoubleArray, not tagged
+ // r7: begin of FixedDoubleArray element fields, not tagged
+ if (!vfp3_supported) __ Push(r1, r0);
+
+ __ b(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(lr);
+ __ b(fail);
+
+ // Convert and copy elements.
+ __ bind(&loop);
+ __ ldr(r9, MemOperand(r3, 4, PostIndex));
+ // r9: current element
+ __ JumpIfNotSmi(r9, &convert_hole);
+
+ // Normal smi, convert to double and store.
+ __ SmiUntag(r9);
+ if (vfp3_supported) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r9);
+ __ vcvt_f64_s32(d0, s0);
+ __ vstr(d0, r7, 0);
+ __ add(r7, r7, Operand(8));
+ } else {
+ FloatingPointHelper::ConvertIntToDouble(masm,
+ r9,
+ FloatingPointHelper::kCoreRegisters,
+ d0,
+ r0,
+ r1,
+ lr,
+ s0);
+ __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
+ }
+ __ b(&entry);
+
+ // Hole found, store the-hole NaN.
+ __ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, "object found in smi-only array");
+ }
+ __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+
+ __ bind(&entry);
+ __ cmp(r7, r6);
+ __ b(lt, &loop);
+
+ if (!vfp3_supported) __ Pop(r1, r0);
+ __ pop(lr);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Label* fail) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : target map, scratch for subsequent call
+ // -- r4 : scratch (elements)
+ // -----------------------------------
+ Label entry, loop, convert_hole, gc_required;
+
+ __ push(lr);
+ __ Push(r3, r2, r1, r0);
+
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ // r4: source FixedDoubleArray
+ // r5: number of elements (smi-tagged)
+
+ // Allocate new FixedArray.
+ __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r0, r0, Operand(r5, LSL, 1));
+ __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ // r6: destination FixedArray, not tagged as heap object
+ __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+ // Set destination FixedDoubleArray's length.
+ __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+
+ // Prepare for conversion loop.
+ __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ add(r3, r6, Operand(FixedArray::kHeaderSize));
+ __ add(r6, r6, Operand(kHeapObjectTag));
+ __ add(r5, r3, Operand(r5, LSL, 1));
+ __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses in r4 to fully take advantage of post-indexing.
+ // r3: begin of destination FixedArray element fields, not tagged
+ // r4: begin of source FixedDoubleArray element fields, not tagged, +4
+ // r5: end of destination FixedArray, not tagged
+ // r6: destination FixedArray
+ // r7: the-hole pointer
+ // r9: heap number map
+ __ b(&entry);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ Pop(r3, r2, r1, r0);
+ __ pop(lr);
+ __ b(fail);
+
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(r4, 8, PostIndex));
+ // lr: current element's upper 32 bit
+ // r4: address of next element's upper 32 bit
+ __ cmp(r1, Operand(kHoleNanUpper32));
+ __ b(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
+ // r2: new heap number
+ __ ldr(r0, MemOperand(r4, 12, NegOffset));
+ __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
+ __ mov(r0, r3);
+ __ str(r2, MemOperand(r3, 4, PostIndex));
+ __ RecordWrite(r6,
+ r0,
+ r2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ str(r7, MemOperand(r3, 4, PostIndex));
+
+ __ bind(&entry);
+ __ cmp(r3, r5);
+ __ b(lt, &loop);
+
+ __ Pop(r3, r2, r1, r0);
+ // Update receiver's map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ RecordWriteField(r2,
+ JSObject::kElementsOffset,
+ r6,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(lr);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ tst(result, Operand(kIsIndirectStringMask));
+ __ b(eq, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ tst(result, Operand(kSlicedNotConsMask));
+ __ b(eq, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ add(index, index, Operand(result, ASR, kSmiTagSize));
+ __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result, ip);
+ __ b(ne, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(ne, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ add(string,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ jmp(&check_encoding);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ tst(result, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found");
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ tst(result, Operand(kShortExternalStringMask));
+ __ b(ne, call_runtime);
+ __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ tst(result, Operand(kStringEncodingMask));
+ __ b(ne, &ascii);
+ // Two-byte string.
+ __ ldrh(result, MemOperand(string, index, LSL, 1));
+ __ jmp(&done);
+ __ bind(&ascii);
+ // Ascii string.
+ __ ldrb(result, MemOperand(string, index));
+ __ bind(&done);
}
+#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index d27982abac..c340e6b108 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -29,7 +29,6 @@
#define V8_ARM_CODEGEN_ARM_H_
#include "ast.h"
-#include "code-stubs-arm.h"
#include "ic-inl.h"
namespace v8 {
@@ -69,21 +68,26 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- return FLAG_debug_code ? 32 : 13;
- }
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- return Isolate::Current()->inlined_write_barrier_size() + 4;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 823c6ff7e1..49b8db79f8 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -87,22 +87,21 @@ namespace v8 {
namespace internal {
// Constant pool marker.
-static const int kConstantPoolMarkerMask = 0xffe00000;
-static const int kConstantPoolMarker = 0x0c000000;
-static const int kConstantPoolLengthMask = 0x001ffff;
+const int kConstantPoolMarkerMask = 0xffe00000;
+const int kConstantPoolMarker = 0x0c000000;
+const int kConstantPoolLengthMask = 0x001ffff;
// Number of registers in normal ARM mode.
-static const int kNumRegisters = 16;
+const int kNumRegisters = 16;
// VFP support.
-static const int kNumVFPSingleRegisters = 32;
-static const int kNumVFPDoubleRegisters = 16;
-static const int kNumVFPRegisters =
- kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+const int kNumVFPSingleRegisters = 32;
+const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
-static const int kPCRegister = 15;
-static const int kNoRegister = -1;
+const int kPCRegister = 15;
+const int kNoRegister = -1;
// -----------------------------------------------------------------------------
// Conditions.
@@ -371,9 +370,9 @@ enum SoftwareInterruptCodes {
// stop
kStopCode = 1 << 23
};
-static const uint32_t kStopCodeMask = kStopCode - 1;
-static const uint32_t kMaxStopCode = kStopCode - 1;
-static const int32_t kDefaultStopCode = -1;
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
@@ -391,17 +390,17 @@ enum VFPConversionMode {
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-static const uint32_t kVFPInexactExceptionBit = 1 << 4;
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPExceptionMask = 0xf;
+const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+const uint32_t kVFPInexactExceptionBit = 1 << 4;
+const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+const uint32_t kVFPNConditionFlagBit = 1 << 31;
+const uint32_t kVFPZConditionFlagBit = 1 << 30;
+const uint32_t kVFPCConditionFlagBit = 1 << 29;
+const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
@@ -418,7 +417,7 @@ enum VFPRoundingMode {
kRoundToZero = RZ
};
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
+const uint32_t kVFPRoundingModeMask = 3 << 22;
enum CheckForInexactConversion {
kCheckForInexactConversion,
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 07a22722c8..837410302a 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -132,55 +132,57 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ tst(reg, Operand(0xc0000000));
+ __ Assert(eq, "Unable to encode value as smi");
+ }
+ __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
+ __ stm(db_w, sp, object_regs | non_object_regs);
}
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
+ __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ ldm(ia_w, sp, object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
}
}
- }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
@@ -265,11 +267,11 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- r1 : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 00357f76db..4b54b6dbc2 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -44,12 +44,6 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- // Nothing to do. No new relocation information is written for lazy
- // deoptimization on ARM.
-}
-
-
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
@@ -58,66 +52,51 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code.
Code* code = function->code();
+ Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each return after a safepoint insert an absolute call to the
- // corresponding deoptimization entry.
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- SafepointEntry safepoint_entry = table.GetEntry(i);
- int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
- // Check that we did not shoot past next safepoint.
- CHECK(pc_offset >= last_pc_offset);
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
- // Destroy the code which is not supposed to be run again.
- int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (int x = 0; x < instructions; x++) {
- destroyer.masm()->bkpt(0);
- }
+ Address prev_call_address = NULL;
#endif
- last_pc_offset = pc_offset;
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
- deoptimization_index, Deoptimizer::LAZY);
- last_pc_offset += gap_code_size;
- int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
- RelocInfo::NONE);
- int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
- ASSERT(call_size_in_bytes <= patch_size());
- CodePatcher patcher(code->instruction_start() + last_pc_offset,
- call_size_in_words);
- patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
- last_pc_offset += call_size_in_bytes;
- }
- }
-
-
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+ RelocInfo::NONE);
+ int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+ ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+ ASSERT(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(call_address, call_size_in_words);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ ASSERT(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
- // Destroy the code which is not supposed to be run again.
- int instructions =
- (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (int x = 0; x < instructions; x++) {
- destroyer.masm()->bkpt(0);
- }
+ prev_call_address = call_address;
#endif
+ }
+
+ Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -125,16 +104,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
- if (FLAG_print_code) {
- code->PrintLn();
- }
-#endif
}
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@@ -169,10 +144,14 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@@ -193,6 +172,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, check_code);
}
@@ -632,7 +614,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@@ -686,8 +671,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
+ }
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
@@ -703,7 +691,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
- // __ add(r6, r2, Operand(r3, LSL, 1));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
@@ -737,8 +724,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove lr
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address(isolate);
- __ mov(r10, Operand(roots_address));
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate);
+ __ mov(r10, Operand(roots_array_start));
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 26bbd82d00..1844149861 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -35,22 +35,22 @@ namespace internal {
// The ARM ABI does not specify the usage of register r9, which may be reserved
// as the static base or thread register on some platforms, in which case we
// leave it alone. Adjust the value of kR9Available accordingly:
-static const int kR9Available = 1; // 1 if available to us, 0 if reserved
+const int kR9Available = 1; // 1 if available to us, 0 if reserved
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 16;
+const int kNumRegs = 16;
// Caller-saved/arguments registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
1 << 0 | // r0 a1
1 << 1 | // r1 a2
1 << 2 | // r2 a3
1 << 3; // r3 a4
-static const int kNumJSCallerSaved = 4;
+const int kNumJSCallerSaved = 4;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
@@ -60,7 +60,7 @@ int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
@@ -70,36 +70,45 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
-static const int kNumCalleeSaved = 7 + kR9Available;
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+const RegList kCallerSaved =
+ 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 9; // r9
+
+
+const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
-static const int kNumDoubleCalleeSaved = 8;
+const int kNumDoubleCalleeSaved = 8;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
- kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kStateOffset = 1 * kPointerSize;
- static const int kContextOffset = 2 * kPointerSize;
- static const int kFPOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = kFPOffset + kPointerSize;
};
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 50ed8b1da7..fdd326618f 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -39,6 +39,7 @@
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
+#include "arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -46,11 +47,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-static unsigned GetPropertyId(Property* property) {
- return property->id();
-}
-
-
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
@@ -131,6 +127,8 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -145,7 +143,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
- if (info->is_strict_mode() || info->is_native()) {
+ if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
@@ -155,6 +153,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@@ -200,13 +203,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(var->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ str(r0, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
}
@@ -234,7 +236,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict_mode()) {
+ if (!is_classic_mode()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -264,7 +266,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -391,7 +396,7 @@ void FullCodeGenerator::TestContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -414,7 +419,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -449,7 +454,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -508,7 +513,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -575,7 +580,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
@@ -665,17 +670,20 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
+
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWrite(scratch0,
- Operand(Context::SlotOffset(var->index())),
- scratch1,
- src);
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
@@ -686,13 +694,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
Label skip;
if (should_normalize) __ b(&skip);
-
- ForwardBailoutStack* current = forward_bailout_stack_;
- while (current != NULL) {
- PrepareForBailout(current->expr(), state);
- current = current->parent();
- }
-
+ PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
@@ -703,13 +705,15 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
Variable* variable = proxy->var();
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
++(*global_count);
@@ -721,7 +725,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
@@ -746,10 +750,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ mov(r1, Operand(cp));
- __ RecordWrite(r1, Operand(offset), r2, result_register());
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ r2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
@@ -761,11 +771,13 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration");
__ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
+ PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+ ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -775,7 +787,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (binding_needs_init) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@@ -917,11 +929,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert);
__ push(r0);
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+ __ b(le, &call_runtime);
+
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next, call_runtime;
+ Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -1000,9 +1018,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
// We got a fixed array in register r0. Iterate through that.
+ Label non_proxy;
__ bind(&fixed_array);
- __ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
- __ Push(r1, r0);
+ __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
+ __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
+ __ b(gt, &non_proxy);
+ __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
@@ -1019,18 +1044,23 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- // Get the expected map from the stack or a zero map in the
+ // Get the expected map from the stack or a smi in the
// permanent slow case into register r2.
__ ldr(r2, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
+ // If not, we may have to filter the key.
Label update_each;
__ ldr(r1, MemOperand(sp, 4 * kPointerSize));
__ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ __ cmp(r2, Operand(Smi::FromInt(0)));
+ __ b(eq, &update_each);
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1085,7 +1115,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(info->language_mode());
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
@@ -1116,7 +1146,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1129,7 +1159,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1173,7 +1203,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
+ if (s->calls_non_strict_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1205,15 +1235,24 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ if (local->mode() == CONST) {
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ } else { // LET || CONST_HARMONY
+ __ b(ne, done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ jmp(done);
}
@@ -1246,24 +1285,64 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == Variable::LET) {
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(r0, var);
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ b(ne, &done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ }
+ context()->Plug(r0);
+ break;
}
- context()->Plug(r0);
}
+ context()->Plug(var);
break;
}
@@ -1337,10 +1416,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_properties()));
+ __ mov(r1, Operand(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1349,10 +1429,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1386,9 +1471,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1447,13 +1532,20 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_elements()));
+ __ mov(r1, Operand(constant_elements));
__ Push(r3, r2, r1);
- if (expr->constant_elements()->map() ==
+ if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1465,8 +1557,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+ constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode = has_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1489,15 +1586,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- // Store the subexpression value in the array's elements.
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ str(result_register(), FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array store with r0 as the scratch
- // register.
- __ RecordWrite(r1, Operand(offset), r2, result_register());
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ ldr(r6, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
+ __ str(result_register(), FieldMemOperand(r1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(r1, offset, result_register(), r2,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ ldr(r1, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+ __ mov(r3, Operand(Smi::FromInt(i)));
+ __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1629,7 +1734,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1637,7 +1742,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1785,9 +1890,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic);
break;
}
@@ -1798,9 +1903,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r2);
__ pop(r0); // Restore value.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic);
break;
}
@@ -1816,9 +1921,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -1844,12 +1949,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
@@ -1869,12 +1974,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(r1, Operand(offset), r2, r3);
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
- // Assignment to var or initializing assignment to let.
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1887,13 +1994,15 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
- __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
@@ -1930,9 +2039,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(r1);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -1976,9 +2085,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(r2);
}
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
@@ -2083,6 +2192,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2091,8 +2201,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
- int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2101,22 +2210,20 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
}
__ push(r1);
- // Push the receiver of the enclosing function and do runtime call.
+ // Push the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(r1);
- // Push the strict mode flag. In harmony mode every eval call
- // is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
- __ mov(r1, Operand(Smi::FromInt(strict_mode)));
+ // Push the language mode.
+ __ mov(r1, Operand(Smi::FromInt(language_mode())));
+ __ push(r1);
+
+ // Push the start position of the scope the calls resides in.
+ __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
__ push(r1);
- __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
- ? Runtime::kResolvePossiblyDirectEvalNoLookup
- : Runtime::kResolvePossiblyDirectEval, 4);
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2150,28 +2257,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i));
}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- Label done;
- Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
- Label slow;
- EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
- // Push the function and resolve eval.
- __ push(r0);
- EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
- __ jmp(&done);
- __ bind(&slow);
- }
-
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
- EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
- __ bind(&done);
+ EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
@@ -2182,6 +2272,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2295,7 +2386,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2307,7 +2399,7 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask));
Split(eq, if_true, if_false, fall_through);
@@ -2315,7 +2407,8 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2327,7 +2420,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
Split(eq, if_true, if_false, fall_through);
@@ -2335,7 +2428,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2360,14 +2454,15 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, if_false);
__ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2381,14 +2476,15 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2404,7 +2500,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ne, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2412,8 +2508,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
-
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2492,12 +2588,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
__ jmp(if_true);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2511,14 +2608,15 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2532,14 +2630,15 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -2553,7 +2652,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2561,8 +2660,8 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -2585,14 +2684,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2608,14 +2708,15 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(r1);
__ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@@ -2629,9 +2730,8 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label exit;
// Get the number of formal parameters.
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2651,7 +2751,8 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
Label done, null, function, non_function_constructor;
@@ -2662,20 +2763,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0.
__ b(lt, &null);
-
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
- __ b(ge, &function);
-
- // Check if the constructor in the map is a function.
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ b(eq, &function);
+
+ __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ b(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor);
@@ -2707,7 +2812,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
@@ -2715,6 +2820,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
@@ -2728,9 +2834,8 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
@@ -2750,7 +2855,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(1, r0);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP3);
@@ -2770,8 +2876,9 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
+ __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ mov(r0, Operand(r4));
- __ mov(r1, Operand(ExternalReference::isolate_address()));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -2780,9 +2887,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2792,9 +2900,10 @@ void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2805,9 +2914,9 @@ void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label done;
@@ -2823,8 +2932,9 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2834,9 +2944,9 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(r1); // r0 = value. r1 = object.
@@ -2853,16 +2963,18 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+ __ mov(r2, r0);
+ __ RecordWriteField(
+ r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
-
// Load the argument on the stack and call the stub.
VisitForStackValue(args->at(0));
@@ -2872,9 +2984,9 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
Label done;
@@ -2890,15 +3002,14 @@ void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
Register object = r1;
Register index = r0;
- Register scratch = r2;
Register result = r3;
__ pop(object);
@@ -2908,7 +3019,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2937,16 +3047,15 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
-
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
Register object = r1;
Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ pop(object);
@@ -2956,8 +3065,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -2986,9 +3094,9 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -2998,9 +3106,9 @@ void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3010,10 +3118,11 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3021,10 +3130,23 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallStub(&stub);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3032,10 +3154,11 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3043,8 +3166,9 @@ void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3052,7 +3176,8 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
int arg_count = args->length() - 2; // 2 ~ receiver and function.
@@ -3061,18 +3186,31 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
+ __ b(eq, &proxy);
+
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(r0);
}
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3082,7 +3220,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
@@ -3141,16 +3280,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
- Label new_space;
- __ InNewSpace(elements, scratch1, eq, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ scratch1,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
- __ mov(scratch1, elements);
- __ RecordWriteHelper(elements, index1, scratch2);
- __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index1,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index2,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3164,9 +3318,9 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
-
ASSERT_NE(NULL, args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
@@ -3215,7 +3369,8 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
Register right = r0;
@@ -3255,7 +3410,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
Label materialize_true, materialize_false;
@@ -3267,14 +3423,15 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
@@ -3289,12 +3446,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
-
+ ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(0));
@@ -3571,7 +3728,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+ ? kNonStrictMode : kStrictMode;
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -3579,7 +3738,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@@ -3622,18 +3781,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
} else {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r0);
+ __ jmp(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r0);
+ __ bind(&done);
}
break;
}
@@ -3826,9 +4002,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->StoreIC_Initialize()
+ : isolate()->builtins()->StoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3843,9 +4019,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
- Handle<Code> ic = is_strict_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic = is_classic_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3892,20 +4068,25 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- VisitInCurrentContext(expr);
+ VisitInDuplicateContext(expr);
}
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(expr);
+ VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_symbol())) {
__ JumpIfSmi(r0, if_true);
@@ -3942,9 +4123,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- Split(ge, if_true, if_false, fall_through);
-
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
+ __ b(eq, if_true);
+ __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+ Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
@@ -3963,18 +4146,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -3982,9 +4154,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
-
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -3992,20 +4167,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
@@ -4015,7 +4183,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
__ tst(r0, r0);
Split(eq, if_true, if_false, fall_through);
@@ -4029,33 +4197,25 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::EQ_STRICT:
case Token::EQ:
cond = eq;
- __ pop(r1);
break;
case Token::LT:
cond = lt;
- __ pop(r1);
break;
case Token::GT:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cond = lt;
- __ mov(r1, result_register());
- __ pop(r0);
+ cond = gt;
break;
case Token::LTE:
- // Reverse left and right sides to obtain ECMA-262 conversion order.
- cond = ge;
- __ mov(r1, result_register());
- __ pop(r0);
+ cond = le;
break;
case Token::GTE:
cond = ge;
- __ pop(r1);
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
+ __ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4073,7 +4233,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
@@ -4085,8 +4245,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4094,15 +4255,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r1, nil_value);
__ cmp(r0, r1);
- if (expr->is_strict()) {
+ if (expr->op() == Token::EQ_STRICT) {
Split(eq, if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ b(eq, if_true);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r1, other_nil_value);
__ cmp(r0, r1);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 2e49cae928..f8e4bbb6ba 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -208,7 +208,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
}
@@ -381,10 +382,10 @@ Object* CallIC_Miss(Arguments args);
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
@@ -394,7 +395,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
- extra_ic_state,
+ extra_state,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
@@ -463,7 +464,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
}
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -485,10 +486,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
+ // Push the receiver and the name of the function.
+ __ Push(r3, r2);
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ mov(r0, Operand(2));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- __ LeaveInternalFrame();
+ // Move result to r1 and leave the internal frame.
+ __ mov(r1, Operand(r0));
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -539,7 +541,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
// Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
ParameterCount actual(argc);
@@ -551,18 +553,6 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
-void CallIC::GenerateMiss(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
Code::ExtraICState extra_ic_state) {
@@ -578,27 +568,6 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -650,12 +619,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- __ EnterInternalFrame();
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r2); // save the key
+ __ Push(r1, r2); // pass the receiver and the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(r2); // restore the key
+ }
__ mov(r1, r0);
__ jmp(&do_call);
@@ -715,7 +685,7 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
- GenerateCallNormal(masm, argc);
+ CallICBase::GenerateNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@@ -908,7 +878,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
__ add(r6, r3, r5);
- __ RecordWrite(r3, r6, r9);
+ __ mov(r9, r0);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
@@ -916,7 +887,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
__ add(r6, r3, r4);
- __ RecordWrite(r3, r6, r9);
+ __ mov(r9, r0);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -1137,14 +1109,12 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register receiver = r1;
Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1239,6 +1209,47 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r2 : receiver
+ // -- r3 : target map
+ // -- lr : return address
+ // -----------------------------------
+ // Must return the modified receiver in r0.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ push(r2);
+ __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r2 : receiver
+ // -- r3 : target map
+ // -- lr : return address
+ // -----------------------------------
+ // Must return the modified receiver in r0.
+ if (!FLAG_trace_elements_transitions) {
+ Label fail;
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&fail);
+ }
+
+ __ push(r2);
+ __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
@@ -1267,13 +1278,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, array, extra, check_if_double_array;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
+ Register elements_map = r6;
+ Register receiver_map = r7;
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1281,35 +1296,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
- __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
- __ cmp(r4, Operand(JS_PROXY_TYPE));
- __ b(eq, &slow);
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
- __ b(lo, &fast);
+ __ b(lo, &fast_object_with_map_check);
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1330,21 +1336,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &check_if_double_array);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast);
+ __ b(&fast_object_without_map_check);
+
+ __ bind(&check_if_double_array);
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ // Add 1 to key, and go to common element store code for doubles.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(r4, key, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1352,18 +1368,57 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra);
// Fall through to fast case.
- __ bind(&fast);
- // Fast case, store the value to the elements backing store.
- __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(r5));
- // Skip write barrier if the written value is a smi.
- __ tst(value, Operand(kSmiTagMask));
- __ Ret(eq);
+ __ bind(&fast_object_with_map_check);
+ Register scratch_value = r4;
+ Register address = r5;
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &fast_double_with_map_check);
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to slow case when writing non-smi into smi-only array.
+ __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Fast elements array, store the value to the elements backing store.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, Operand(r4), r5, r6);
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements,
+ r4,
+ r5,
+ r6,
+ r7,
+ &slow);
__ Ret();
}
@@ -1510,11 +1565,9 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
case Token::LT:
return lt;
case Token::GT:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return lt;
+ return gt;
case Token::LTE:
- // Reverse left and right operands to obtain ECMA-262 conversion order.
- return ge;
+ return le;
case Token::GTE:
return ge;
default:
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 30ccd05bee..2341774766 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -212,10 +212,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -227,6 +228,13 @@ void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream);
@@ -241,6 +249,14 @@ void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(0)->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -390,6 +406,12 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
@@ -711,7 +733,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -741,7 +765,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasSideEffects()) {
+ if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -753,7 +777,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
@@ -811,28 +836,6 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
}
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new LBitI(op, left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
@@ -994,10 +997,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1007,7 +1013,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1016,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1206,8 +1211,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+ return MarkAsCall(DefineFixed(new LCallFunction(function), r0), instr);
}
@@ -1232,8 +1238,24 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
}
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineAsRegister(new LBitI(left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
}
@@ -1244,16 +1266,6 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
}
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- return DoBit(Token::BIT_XOR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1399,12 +1411,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- Token::Value op = instr->token();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
- LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1416,8 +1426,8 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
@@ -1444,9 +1454,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
+ return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1457,6 +1467,13 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
}
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(instr->value()));
@@ -1471,6 +1488,17 @@ LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
}
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
@@ -1734,7 +1762,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
+ return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1748,14 +1776,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- if (instr->check_hole_value()) {
- LOperand* temp = TempRegister();
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(new LStoreGlobalCell(value, temp));
- } else {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new LStoreGlobalCell(value, NULL);
- }
+ LOperand* temp = TempRegister();
+ LOperand* value = UseTempRegister(instr->value());
+ LInstruction* result = new LStoreGlobalCell(value, temp);
+ if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+ return result;
}
@@ -1968,6 +1993,26 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+ instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, NULL);
+ return DefineSameAsFirst(result);
+ } else {
+ LOperand* object = UseFixed(instr->object(), r0);
+ LOperand* fixed_object_reg = FixedTemp(r2);
+ LOperand* new_map_reg = FixedTemp(r3);
+ LTransitionElementsKind* result =
+ new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2025,8 +2070,14 @@ LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+ HObjectLiteralGeneric* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 8c18760fd1..6051ad9732 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -107,10 +107,12 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -132,7 +134,8 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -162,6 +165,7 @@ class LCodeGen;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -627,16 +631,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
public:
- explicit LIsNullAndBranch(LOperand* value) {
+ explicit LIsNilAndBranch(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
- bool is_strict() const { return hydrogen()->is_strict(); }
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -656,6 +661,20 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -684,6 +703,23 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
};
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -794,18 +830,15 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
- LBitI(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
+ LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
+ Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
- Token::Value op_;
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
};
@@ -1226,7 +1259,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(1); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1259,7 +1292,6 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
- int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1276,7 +1308,9 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
@@ -1379,12 +1413,17 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1560,7 +1599,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1580,7 +1618,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1642,7 +1680,7 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- bool strict_mode() { return hydrogen()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1668,6 +1706,30 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
};
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* new_map_temp,
+ LOperand* temp_reg) {
+ inputs_[0] = object;
+ temps_[0] = new_map_temp;
+ temps_[1] = temp_reg;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_reg() { return temps_[0]; }
+ LOperand* temp_reg() { return temps_[1]; }
+ Handle<Map> original_map() { return hydrogen()->original_map(); }
+ Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
@@ -1838,10 +1900,17 @@ class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
@@ -2159,12 +2228,12 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index f5d7449149..22a504fc64 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -40,37 +40,22 @@ class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
- int deoptimization_index)
+ Safepoint::DeoptMode mode)
: codegen_(codegen),
pointers_(pointers),
- deoptimization_index_(deoptimization_index) { }
+ deopt_mode_(mode) { }
virtual ~SafepointGenerator() { }
- virtual void BeforeCall(int call_size) const {
- ASSERT(call_size >= 0);
- // Ensure that we have enough space after the previous safepoint position
- // for the generated code there.
- int call_end = codegen_->masm()->pc_offset() + call_size;
- int prev_jump_end =
- codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
- if (call_end < prev_jump_end) {
- int padding_size = prev_jump_end - call_end;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- codegen_->masm()->nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
+ virtual void BeforeCall(int call_size) const { }
virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
private:
LCodeGen* codegen_;
LPointerMap* pointers_;
- int deoptimization_index_;
+ Safepoint::DeoptMode deopt_mode_;
};
@@ -82,6 +67,14 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
+
+ CodeStub::GenerateFPStubs();
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -95,7 +88,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
@@ -151,7 +143,7 @@ bool LCodeGen::GeneratePrologue() {
// with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
- if (info_->is_strict_mode() || info_->is_native()) {
+ if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
@@ -192,7 +184,7 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -206,13 +198,11 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(var->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ str(r0, target);
+ // Update the write barrier. This clobbers r3 and r0.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -243,35 +233,23 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
}
+ EnsureSpaceForLazyDeopt();
return !is_aborted();
}
-LInstruction* LCodeGen::GetNextInstruction() {
- if (current_instruction_ < instructions_->length() - 1) {
- return instructions_->at(current_instruction_ + 1);
- } else {
- return NULL;
- }
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
-
- // Pad code to ensure that the last piece of deferred code have
- // room for lazy bailout.
- while ((masm()->pc_offset() - LastSafepointEnd())
- < Deoptimizer::patch_size()) {
- __ nop();
- }
}
// Force constant pool emission at the end of the deferred code to make
@@ -401,6 +379,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ return value->Number();
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
@@ -551,7 +535,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
- RegisterLazyDeoptimization(instr, safepoint_mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -571,7 +555,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -580,37 +564,12 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
LInstruction* instr) {
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode) {
- // Create the environment to bailout to. If the call has side effects
- // execution has to continue after the call otherwise execution can continue
- // from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
- } else {
- deoptimization_environment = instr->environment();
- }
-
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(),
- 0,
- deoptimization_environment->deoptimization_index());
- }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -632,14 +591,17 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
Translation translation(&translations_, frame_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
- environment->Register(deoptimization_index, translation.index());
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment);
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -701,6 +663,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
@@ -732,16 +695,28 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
+void LCodeGen::RecordSafepointWithLazyDeopt(
+ LInstruction* instr, SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index) {
+ Safepoint::DeoptMode deopt_mode) {
ASSERT(expected_safepoint_kind_ == kind);
- const ZoneList<LOperand*>* operands = pointers->operands();
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deoptimization_index);
+ kind, arguments, deopt_mode);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
@@ -758,31 +733,31 @@ void LCodeGen::RecordSafepoint(
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
}
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
- RecordSafepoint(&empty_pointers, deoptimization_index);
+ RecordSafepoint(&empty_pointers, deopt_mode);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
- deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
}
void LCodeGen::RecordSafepointWithRegistersAndDoubles(
LPointerMap* pointers,
int arguments,
- int deoptimization_index) {
- RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
- deoptimization_index);
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
}
@@ -817,12 +792,6 @@ void LCodeGen::DoGap(LGap* gap) {
LParallelMove* move = gap->GetParallelMove(inner_pos);
if (move != NULL) DoParallelMove(move);
}
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
}
@@ -1032,6 +1001,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
@@ -1129,7 +1099,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
0,
- Safepoint::kNoDeoptimizationIndex);
+ Safepoint::kNoLazyDeopt);
// Overwrite the stored value of r0 with the result of the stub.
__ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
}
@@ -1695,30 +1665,44 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- __ cmp(ToRegister(left), ToRegister(right));
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block =
+ EvalComparison(instr->op(), left_val, right_val) ? true_block
+ : false_block;
+ EmitGoto(next_block);
} else {
- EmitCmpI(left, right);
+ if (instr->is_double()) {
+ // Compare left and right operands as doubles and load the
+ // resulting flags into the normal status register.
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ if (right->IsConstantOperand()) {
+ __ cmp(ToRegister(left),
+ Operand(ToInteger32(LConstantOperand::cast(right))));
+ } else if (left->IsConstantOperand()) {
+ __ cmp(ToRegister(right),
+ Operand(ToInteger32(LConstantOperand::cast(left))));
+ // We transposed the operands. Reverse the condition.
+ cond = ReverseCondition(cond);
+ } else {
+ __ cmp(ToRegister(left), ToRegister(right));
+ }
+ }
+ EmitBranch(true_block, false_block, cond);
}
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- EmitBranch(true_block, false_block, cc);
}
@@ -1743,25 +1727,35 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Jump to the false block.
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ EmitGoto(false_block);
+ return;
+ }
int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(ip, nil_value);
__ cmp(reg, ip);
- if (instr->is_strict()) {
+ if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, eq);
} else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, other_nil_value);
__ cmp(reg, ip);
__ b(eq, true_label);
__ JumpIfSmi(reg, false_label);
@@ -1818,6 +1812,31 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
}
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsString(reg, temp1, false_label);
+
+ EmitBranch(true_block, false_block, true_cond);
+}
+
+
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1843,6 +1862,41 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+
+ Condition condition = ComputeCompareCondition(op);
+
+ EmitBranch(true_block, false_block, condition);
+}
+
+
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1918,28 +1972,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(ge, is_true);
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, is_false);
+ __ b(eq, is_true);
+ __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ b(eq, is_true);
} else {
- __ b(ge, is_false);
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ b(gt, is_false);
}
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -2014,11 +2076,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
-
+ virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
-
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -2082,8 +2143,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
Register result = ToRegister(instr->result());
ASSERT(result.is(r0));
@@ -2115,32 +2176,15 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LEnvironment* env = instr->deoptimization_environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
}
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -2149,9 +2193,6 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
@@ -2180,7 +2221,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->check_hole_value()) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
@@ -2203,6 +2244,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@@ -2211,8 +2253,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
- if (instr->hydrogen()->check_hole_value()) {
- Register scratch2 = ToRegister(instr->TempAt(0));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2222,6 +2263,21 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+ // Cells are always in the remembered set.
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteField(scratch,
+ JSGlobalPropertyCell::kValueOffset,
+ value,
+ scratch2,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ check_needed);
+ }
}
@@ -2230,7 +2286,7 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(r0));
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2247,10 +2303,20 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ str(value, ContextOperand(context, instr->slot_index()));
- if (instr->needs_write_barrier()) {
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, Operand(offset), value, scratch0());
+ MemOperand target = ContextOperand(context, instr->slot_index());
+ __ str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch0(),
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -2271,7 +2337,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
- LookupResult lookup;
+ LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsProperty() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2500,13 +2566,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // TODO(danno): If no hole check is required, there is no need to allocate
- // elements into a temporary register, instead scratch can be used.
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
__ vldr(result, elements, 0);
}
@@ -2577,6 +2639,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2712,12 +2775,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
v8::internal::ParameterCount actual(receiver);
@@ -2740,7 +2800,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2799,7 +2859,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ Call(ip);
// Setup deoptimization.
- RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
// Restore context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2906,6 +2966,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -3109,6 +3170,14 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ TranscendentalCacheStub stub(TranscendentalCache::TAN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3148,6 +3217,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSin:
DoMathSin(instr);
break;
+ case kMathTan:
+ DoMathTan(instr);
+ break;
case kMathLog:
DoMathLog(instr);
break;
@@ -3163,10 +3235,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(instr->HasPointerMap());
ASSERT(instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3199,12 +3269,12 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3258,19 +3328,36 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (instr->is_in_object()) {
__ str(value, FieldMemOperand(object, offset));
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, Operand(offset), value, scratch);
+ __ RecordWriteField(object,
+ offset,
+ value,
+ scratch,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ str(value, FieldMemOperand(scratch, offset));
- if (instr->needs_write_barrier()) {
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWrite(scratch, Operand(offset), value, object);
+ __ RecordWriteField(scratch,
+ offset,
+ value,
+ object,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
}
@@ -3282,7 +3369,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3301,6 +3388,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ __ tst(value, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment());
+ }
+
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3314,9 +3408,18 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
+ __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
}
}
@@ -3417,6 +3520,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3431,13 +3535,55 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = instr->strict_mode()
+ Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+
+ Label not_applicable;
+ __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ cmp(scratch, Operand(from_map));
+ __ b(ne, &not_applicable);
+ __ mov(new_map_reg, Operand(to_map));
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+ scratch, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(r2));
+ ASSERT(new_map_reg.is(r3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+ RelocInfo::CODE_TARGET, instr);
+ } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT(fixed_object_reg.is(r2));
+ ASSERT(new_map_reg.is(r3));
+ __ mov(fixed_object_reg, object_reg);
+ CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+ RelocInfo::CODE_TARGET, instr);
+ } else {
+ UNREACHABLE();
+ }
+ __ bind(&not_applicable);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@@ -3452,87 +3598,19 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- // Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ tst(result, Operand(kIsIndirectStringMask));
- __ b(eq, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ tst(result, Operand(kSlicedNotConsMask));
- __ b(eq, &cons_string);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
- __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result, ip);
- __ b(ne, deferred->entry());
- // Get the first of the two strings and load its instance type.
- __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, deferred->entry());
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- __ add(result,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ ldrh(result, MemOperand(result, index, LSL, 1));
- __ jmp(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(result,
- string,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ ldrb(result, MemOperand(result, index));
-
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -3575,6 +3653,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3646,6 +3725,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -3711,6 +3791,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3819,16 +3900,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0();
@@ -3911,6 +3982,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4150,10 +4231,15 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
+ __ mov(r1, Operand(constant_elements));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@@ -4170,26 +4256,106 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ constant_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(r2));
+ ASSERT(!result.is(r2));
+
+ // Increase the offset so that subsequent objects end up right after
+ // this one.
+ int current_offset = *offset;
+ int size = object->map()->instance_size();
+ *offset += size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ ASSERT(object->elements()->length() == 0 ||
+ object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ __ ldr(r2, FieldMemOperand(source, i));
+ __ str(r2, FieldMemOperand(result, current_offset + i));
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ add(r2, result, Operand(*offset));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ } else {
+ __ mov(r2, Operand(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ }
+ }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ mov(r0, Operand(Smi::FromInt(size)));
+ __ push(r0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
__ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
__ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
- __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+ __ mov(r2, Operand(constant_properties));
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ __ mov(r1, Operand(Smi::FromInt(flags)));
__ Push(r4, r3, r2, r1);
// Pick the right runtime function to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4262,8 +4428,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(
- shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+ FastNewClosureStub stub(shared_info->language_mode());
__ mov(r1, Operand(shared_info));
__ push(r1);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -4296,8 +4461,9 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
false_label,
input,
instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(true_block, false_block, final_branch_condition);
+ }
}
@@ -4343,10 +4509,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch,
- FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- final_branch_condition = ge;
+ __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+ __ b(eq, true_label);
+ __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+ final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4365,9 +4533,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = eq;
} else {
- final_branch_condition = ne;
__ b(false_label);
- // A dead branch instruction will be generated after this point.
}
return final_branch_condition;
@@ -4403,9 +4569,29 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- // No code for lazy bailout instruction. Used to capture environment after a
- // call for populating the safepoint data with deoptimization data.
+ EnsureSpaceForLazyDeopt();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4422,12 +4608,9 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
__ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(
+ this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
@@ -4438,27 +4621,20 @@ void LCodeGen::DoIn(LIn* instr) {
__ Push(key, obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator safepoint_generator(this,
- pointers,
- env->deoptimization_index());
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RegisterLazyDeoptimization(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- }
-
- // The gap code includes the restoring of the safepoint registers.
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
@@ -4468,10 +4644,15 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
if (instr->hydrogen()->is_function_entry()) {
// Perform stack overflow check.
Label done;
@@ -4480,7 +4661,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ b(hs, &done);
StackCheckStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ EnsureSpaceForLazyDeopt();
__ bind(&done);
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -4489,8 +4673,13 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
}
}
@@ -4506,7 +4695,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment);
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index ead8489034..363449a509 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
+ last_lazy_deopt_pc_(0),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -86,6 +87,7 @@ class LCodeGen BASE_EMBEDDED {
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
@@ -111,8 +113,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -139,8 +141,8 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- int strict_mode_flag() const {
- return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+ StrictModeFlag strict_mode_flag() const {
+ return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -206,7 +208,7 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr);
// Generate a direct call to a known function. Expects the function
- // to be in edi.
+ // to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
@@ -214,10 +216,11 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr,
- SafepointMode safepoint_mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
@@ -239,6 +242,7 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
@@ -246,24 +250,20 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
- int deoptimization_index);
- void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
- void RecordSafepoint(int deoptimization_index);
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
- int deoptimization_index);
+ Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
- int deoptimization_index);
+ Safepoint::DeoptMode mode);
void RecordPosition(int position);
- int LastSafepointEnd() {
- return static_cast<int>(safepoints_.GetPcAfterGap());
- }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
@@ -272,8 +272,10 @@ class LCodeGen BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name);
+ Condition EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -283,6 +285,13 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
@@ -292,6 +301,13 @@ class LCodeGen BASE_EMBEDDED {
Handle<Map> type,
Handle<String> name);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
@@ -300,6 +316,8 @@ class LCodeGen BASE_EMBEDDED {
Address address;
};
+ void EnsureSpaceForLazyDeopt();
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -316,6 +334,7 @@ class LCodeGen BASE_EMBEDDED {
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
+ int last_lazy_deopt_pc_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -376,16 +395,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -396,6 +419,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index f37f310218..4fc3b03abf 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -42,7 +42,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true) {
+ allow_stub_calls_(true),
+ has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -406,32 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address.
- Bfc(object, 0, kPageSizeBits);
-
- // Calculate region number.
- Ubfx(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, address));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -443,38 +418,52 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Will clobber 4 registers: object, offset, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
- // Add offset into the object.
- add(scratch0, object, offset);
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
+ add(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ b(eq, &ok);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object,
+ dst,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+ mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
@@ -484,29 +473,103 @@ void MacroAssembler::RecordWrite(Register object,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register scratch) {
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+ ASSERT(!address.is(cp) && !value.is(cp));
+
+ if (FLAG_debug_code) {
+ Label ok;
+ ldr(ip, MemOperand(address));
+ cmp(ip, value);
+ b(eq, &ok);
+ stop("Wrong address or value passed to RecordWrite");
+ bind(&ok);
+ }
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(value, Operand(kSmiTagMask));
+ b(eq, &done);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq,
+ &done);
// Record the actual write.
- RecordWriteHelper(object, address, scratch);
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(lr);
+ }
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(address, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+ mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(ip, Operand(store_buffer));
+ ldr(scratch, MemOperand(ip));
+ // Store pointer to buffer and increment buffer top.
+ str(address, MemOperand(scratch, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ str(scratch, MemOperand(ip));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kFallThroughAtEnd) {
+ b(eq, &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Ret(eq);
+ }
+ push(lr);
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(lr);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
}
}
@@ -961,6 +1024,9 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@@ -988,6 +1054,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -1011,6 +1080,9 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -1031,28 +1103,23 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
- ASSERT(function->is_compiled());
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(r1, Operand(Handle<JSFunction>(function)));
+ mov(r1, Operand(function));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Invoke the cached code.
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
- } else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
}
@@ -1090,56 +1157,58 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+ HandlerType type,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // The pc (return address) is passed in register lr.
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // We will build up the handler from the bottom by pushing on the stack.
+ // First compute the state.
+ unsigned state = StackHandler::OffsetField::encode(handler_index);
if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- mov(r3, Operand(StackHandler::TRY_CATCH));
- } else {
- mov(r3, Operand(StackHandler::TRY_FINALLY));
- }
- stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
- // Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(r1, MemOperand(r3));
- push(r1);
- // Link this handler as the new current one.
- str(sp, MemOperand(r3));
+ state |= (type == TRY_CATCH_HANDLER)
+ ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+ : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
} else {
- // Must preserve r0-r4, r5-r7 are available.
ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for fp. We expect the code throwing an exception to check fp
- // before dereferencing it to restore the context.
- mov(r5, Operand(StackHandler::ENTRY)); // State.
- mov(r6, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(r7, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
- // Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(r6, MemOperand(r7));
- push(r6);
- // Link this handler as the new current one.
- str(sp, MemOperand(r7));
+ state |= StackHandler::KindField::encode(StackHandler::ENTRY);
}
+
+ // Set up the code object (r5) and the state (r6) for pushing.
+ mov(r5, Operand(CodeObject()));
+ mov(r6, Operand(state));
+
+ // Push the frame pointer, context, state, and code object.
+ if (try_location == IN_JAVASCRIPT) {
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
+ } else {
+ mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
+ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ }
+
+ // Link the current handler as the next handler.
+ mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ldr(r5, MemOperand(r6));
+ push(r5);
+ // Set this new handler as the current one.
+ str(sp, MemOperand(r6));
}
@@ -1152,42 +1221,50 @@ void MacroAssembler::PopTryHandler() {
}
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ // r0 = exception, r1 = code object, r2 = state.
+ ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
+ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
+ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
+ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
+ add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
+}
+
+
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // r0 is expected to hold the exception.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in r0.
if (!value.is(r0)) {
mov(r0, value);
}
-
- // Drop the sp to the top of the handler.
+ // Drop the stack pointer to the top of the top handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
-
// Restore the next handler.
pop(r2);
str(r2, MemOperand(r3));
- // Restore context and frame pointer, discard state (r3).
- ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
+ // Get the code object (r1) and state (r2). Restore the context and frame
+ // pointer.
+ ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
// If the handler is a JS frame, restore the context to the frame.
- // (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
- // of them.
- cmp(r3, Operand(StackHandler::ENTRY));
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ tst(cp, cp);
str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
- pop(pc);
+ JumpToHandlerEntry();
}
@@ -1196,41 +1273,16 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // r0 is expected to hold the exception.
- if (!value.is(r0)) {
- mov(r0, value);
- }
-
- // Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- ldr(r2, MemOperand(sp, kStateOffset));
- cmp(r2, Operand(StackHandler::ENTRY));
- b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- ldr(sp, MemOperand(sp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(r2);
- str(r2, MemOperand(r3));
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+ // The exception is expected in r0.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
@@ -1241,22 +1293,34 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
str(r0, MemOperand(r2));
+ } else if (!value.is(r0)) {
+ mov(r0, value);
}
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // cp
- // fp
- // lr
+ // Drop the stack pointer to the top of the top stack handler.
+ mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ldr(sp, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind);
+ bind(&fetch_next);
+ ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::ENTRY == 0);
+ ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ tst(r2, Operand(StackHandler::KindField::kMask));
+ b(ne, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(r2);
+ str(r2, MemOperand(r3));
+ // Get the code object (r1) and state (r2). Clear the context and frame
+ // pointer (0 was saved in the handler).
+ ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
- // Restore context and frame pointer, discard state (r2).
- ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
- pop(pc);
+ JumpToHandlerEntry();
}
@@ -1536,6 +1600,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
+ ASSERT(!object_size.is(ip));
ASSERT(!result.is(ip));
ASSERT(!scratch1.is(ip));
ASSERT(!scratch2.is(ip));
@@ -1793,13 +1858,127 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ b(ls, fail);
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ b(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ b(hi, fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail) {
+ Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Register mantissa_reg = scratch2;
+ Register exponent_reg = scratch3;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg,
+ scratch1,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ cmp(exponent_reg, scratch1);
+ b(ge, &maybe_nan);
+
+ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ bind(&have_double_value);
+ add(scratch1, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ str(exponent_reg, FieldMemOperand(scratch1, offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ b(gt, &is_nan);
+ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ cmp(mantissa_reg, Operand(0));
+ b(eq, &have_double_value);
+ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ jmp(&have_double_value);
+
+ bind(&smi_value);
+ add(scratch1, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ add(scratch1, scratch1,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ // scratch1 is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+
+ Register untagged_value = receiver_reg;
+ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(this,
+ untagged_value,
+ destination,
+ d0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ s2);
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ vstr(d0, scratch1, 0);
+ } else {
+ str(mantissa_reg, MemOperand(scratch1, 0));
+ str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -1850,7 +2029,8 @@ void MacroAssembler::DispatchMap(Register obj,
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss) {
+ Label* miss,
+ bool miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
@@ -1858,6 +2038,16 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
b(ne, miss);
+ if (miss_on_bound_function) {
+ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ tst(scratch,
+ Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+ b(ne, miss);
+ }
+
// Make sure that the function has an instance prototype.
Label non_instance;
ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -1895,47 +2085,24 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Handle<Code> code(Code::cast(result));
- Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
- return result;
-}
-
-
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Object* result;
- { MaybeObject* maybe_result = stub->TryGetCode();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
- return result;
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+ int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -1998,14 +2165,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
mov(pc, lr);
bind(&promote_scheduled_exception);
- MaybeObject* result
- = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
- if (result->IsFailure()) {
- return result;
- }
+ TailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -2017,8 +2180,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
jmp(&leave_exit_frame);
+}
+
- return result;
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
}
@@ -2417,8 +2584,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -2445,17 +2611,6 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
}
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- return TryJumpToExternalReference(ext);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -2476,21 +2631,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
- const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- return TryTailCallStub(&stub);
-}
-
-
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
@@ -2622,14 +2768,20 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
- CallRuntime(Runtime::kAbort, 2);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -2930,6 +3082,19 @@ void MacroAssembler::CopyBytes(Register src,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ b(&entry);
+ bind(&loop);
+ str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ bind(&entry);
+ cmp(start_offset, end_offset);
+ b(lt, &loop);
+}
+
+
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
@@ -2941,8 +3106,10 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
- mov(zeros, Operand(0, RelocInfo::NONE));
+ // Order of the next two lines is important: zeros register
+ // can be the same as source register.
Move(scratch, source);
+ mov(zeros, Operand(0, RelocInfo::NONE));
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3089,23 +3256,15 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- CallCFunctionHelper(no_reg,
- function,
- ip,
- num_reg_arguments,
- num_double_arguments);
+ mov(ip, Operand(function));
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_reg_arguments,
- num_double_arguments);
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
@@ -3116,17 +3275,15 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
int num_arguments) {
- CallCFunction(function, scratch, num_arguments, 0);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
int num_reg_arguments,
int num_double_arguments) {
+ ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -3150,10 +3307,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
- if (function.is(no_reg)) {
- mov(scratch, Operand(function_reference));
- function = scratch;
- }
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@@ -3185,6 +3338,185 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met) {
+ and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+ ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ tst(scratch, Operand(mask));
+ b(cc, condition_met);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(ip, Operand(mask_scratch));
+ b(first_bit == 1 ? eq : ne, &other_color);
+ // Shift left 1 by adding.
+ add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
+ b(eq, &word_boundary);
+ tst(ip, Operand(mask_scratch));
+ b(second_bit == 1 ? ne : eq, has_color);
+ jmp(&other_color);
+
+ bind(&word_boundary);
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+ tst(ip, Operand(1));
+ b(second_bit == 1 ? ne : eq, has_color);
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ Label is_data_object;
+ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ b(eq, &is_data_object);
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ b(ne, not_data_object);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+ Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
+ mov(ip, Operand(1));
+ mov(mask_reg, Operand(ip, LSL, mask_reg));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(mask_scratch, load_scratch);
+ b(ne, &done);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // LSL may overflow, making the check conservative.
+ tst(load_scratch, Operand(mask_scratch, LSL, 1));
+ b(eq, &ok);
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object;
+
+ // Check for heap-number
+ ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
+ b(eq, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ b(ne, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ tst(instance_type, Operand(kExternalStringTag));
+ mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
+ b(ne, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ ldr(ip, FieldMemOperand(value, String::kLengthOffset));
+ tst(instance_type, Operand(kStringEncodingMask));
+ mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
+ add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, length, Operand(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ orr(ip, ip, Operand(mask_scratch));
+ str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ add(ip, ip, Operand(length));
+ str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
}
@@ -3234,6 +3566,17 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 6084fde2d3..2725883ee1 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -29,6 +29,7 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -38,12 +39,12 @@ namespace internal {
// Static helper functions
// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
+inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-static inline Operand SmiUntagOperand(Register object) {
+inline Operand SmiUntagOperand(Register object) {
return Operand(object, ASR, kSmiTagSize);
}
@@ -79,6 +80,14 @@ enum ObjectToDoubleFlags {
};
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -157,40 +166,126 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object,
+ Register value,
+ Register address);
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise
- Label* branch);
-
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the ip register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
+ void JumpIfBlack(Register object,
Register scratch0,
- Register scratch1);
+ Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* object_is_white_and_not_data);
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
+ // Detects conservatively whether an object is data-only, ie it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Push a handle.
void Push(Handle<Object> handle);
@@ -225,8 +320,11 @@ class MacroAssembler: public Assembler {
}
// Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2,
- Register src3, Register src4, Condition cond = al) {
+ void Push(Register src1,
+ Register src2,
+ Register src3,
+ Register src4,
+ Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
@@ -265,6 +363,57 @@ class MacroAssembler: public Assembler {
}
}
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
+ ASSERT(!src1.is(src2));
+ ASSERT(!src2.is(src3));
+ ASSERT(!src1.is(src3));
+ if (src1.code() > src2.code()) {
+ if (src2.code() > src3.code()) {
+ ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ } else {
+ ldr(src3, MemOperand(sp, 4, PostIndex), cond);
+ ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ }
+ } else {
+ Pop(src2, src3, cond);
+ str(src1, MemOperand(sp, 4, PostIndex), cond);
+ }
+ }
+
+ // Pop four registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1,
+ Register src2,
+ Register src3,
+ Register src4,
+ Condition cond = al) {
+ ASSERT(!src1.is(src2));
+ ASSERT(!src2.is(src3));
+ ASSERT(!src1.is(src3));
+ ASSERT(!src1.is(src4));
+ ASSERT(!src2.is(src4));
+ ASSERT(!src3.is(src4));
+ if (src1.code() > src2.code()) {
+ if (src2.code() > src3.code()) {
+ if (src3.code() > src4.code()) {
+ ldm(ia_w,
+ sp,
+ src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+ cond);
+ } else {
+ ldr(src4, MemOperand(sp, 4, PostIndex), cond);
+ ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ }
+ } else {
+ Pop(src3, src4, cond);
+ ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+ }
+ } else {
+ Pop(src2, src3, src4, cond);
+ ldr(src1, MemOperand(sp, 4, PostIndex), cond);
+ }
+ }
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -318,16 +467,6 @@ class MacroAssembler: public Assembler {
const double imm,
const Condition cond = al);
-
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -381,7 +520,7 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- void InvokeFunction(JSFunction* function,
+ void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind);
@@ -410,9 +549,9 @@ class MacroAssembler: public Assembler {
// Exception handling
// Push a new try handler and link into try handler chain.
- // The return address must be passed in register lr.
- // On exit, r0 contains TOS (code slot).
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ void PushTryHandler(CodeLocation try_location,
+ HandlerType type,
+ int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
@@ -569,6 +708,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -580,7 +726,8 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
- Label* miss);
+ Label* miss,
+ bool miss_on_bound_function = false);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
@@ -608,6 +755,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -754,20 +926,9 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
- // Call a code stub and return the code object called. Try to generate
- // the code if necessary. Do not perform a GC but instead return a retry
- // after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
-
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
- // Tail call a code stub (jump) and return the code object called. Try to
- // generate the code if necessary. Do not perform a GC but instead return
- // a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
- Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -786,12 +947,6 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- // Tail call of a runtime routine (jump). Try to generate the code if
- // necessary. Do not perform a GC but instead return a retry after GC
- // failure.
- MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
- const ExternalReference& ext, int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -830,28 +985,25 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
- void CallCFunction(Register function, Register scratch,
+ void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context.
- // stack_space - space to be unwound on exit (includes the call js
- // arguments space and the additional space allocated for the fast call).
- MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
- int stack_space);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call js arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -902,6 +1054,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@@ -1048,10 +1203,12 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
private:
void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
int num_reg_arguments,
int num_double_arguments);
@@ -1067,16 +1224,29 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@@ -1084,6 +1254,7 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -1129,12 +1300,12 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-static MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-static inline MemOperand GlobalObjectOperand() {
+inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index cd76edbf15..b212f9f6e6 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -371,9 +371,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+ }
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -611,6 +614,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
@@ -1102,6 +1111,11 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 6af535553f..0525529fde 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -53,7 +53,7 @@ namespace internal {
// code.
class ArmDebugger {
public:
- explicit ArmDebugger(Simulator* sim);
+ explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
~ArmDebugger();
void Stop(Instruction* instr);
@@ -84,11 +84,6 @@ class ArmDebugger {
};
-ArmDebugger::ArmDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
-
ArmDebugger::~ArmDebugger() {
}
@@ -296,6 +291,13 @@ void ArmDebugger::Debug() {
if (line == NULL) {
break;
} else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int argc = SScanF(line,
@@ -611,7 +613,6 @@ void ArmDebugger::Debug() {
PrintF("Unknown command: %s\n", cmd);
}
}
- DeleteArray(line);
}
// Add all the breakpoints back to stop execution and enter the debugger
@@ -645,6 +646,12 @@ static bool AllOnOnePage(uintptr_t start, int size) {
}
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
size_t size) {
@@ -781,6 +788,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
InitializeCoverage();
+
+ last_debugger_input_ = NULL;
}
@@ -1268,9 +1277,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+ // Leave a safety margin of 512 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 256;
+ return reinterpret_cast<uintptr_t>(stack_) + 512;
}
@@ -1618,6 +1627,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ // Catch null pointers a little earlier.
+ ASSERT(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 391ef69f5e..585f1e0176 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -194,6 +194,10 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
@@ -360,6 +364,9 @@ class Simulator {
bool pc_modified_;
int icount_;
+ // Debugger input.
+ char* last_debugger_input_;
+
// Icache simulation
v8::internal::HashMap* i_cache_;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index f8565924b1..47778f580f 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -95,13 +95,12 @@ static void ProbeTable(Isolate* isolate,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
- MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<String> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsSymbol());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -138,20 +137,15 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
__ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
- masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- if (result->IsFailure()) return result;
-
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- return result;
}
@@ -238,7 +232,10 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -246,8 +243,8 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
- JSFunction* function =
- JSFunction::cast(isolate->global_context()->get(index));
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->global_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -259,8 +256,10 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
+ Register dst,
+ Register src,
+ Handle<JSObject> holder,
+ int index) {
// Adjust for the number of properties stored in the holder.
index -= holder->map()->inobject_properties();
if (index < 0) {
@@ -367,9 +366,9 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
int index,
- Map* transition,
+ Handle<Map> transition,
Register receiver_reg,
Register name_reg,
Register scratch,
@@ -395,11 +394,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
- __ mov(r2, Operand(Handle<Map>(transition)));
+ __ mov(r2, Operand(transition));
__ Push(r2, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -409,10 +408,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- if (transition != NULL) {
+ if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
- __ mov(ip, Operand(Handle<Map>(transition)));
+ __ mov(ip, Operand(transition));
__ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
}
@@ -431,7 +430,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ __ mov(name_reg, r0);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -444,7 +449,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ __ mov(name_reg, r0);
+ __ RecordWriteField(scratch,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Return the value (register r0).
@@ -455,20 +466,15 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
- } else {
- code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> code = (kind == Code::LOAD_IC)
+ ? masm->isolate()->builtins()->LoadIC_Miss()
+ : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
static void GenerateCallFunction(MacroAssembler* masm,
- Object* object,
+ Handle<Object> object,
const ParameterCount& arguments,
Label* miss,
Code::ExtraICState extra_ic_state) {
@@ -501,12 +507,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
- JSObject* holder_obj) {
+ Handle<JSObject> holder_obj) {
__ push(name);
- InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
- __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ mov(scratch, Operand(interceptor));
__ push(scratch);
__ push(receiver);
__ push(holder);
@@ -515,11 +521,12 @@ static void PushInterceptorArguments(MacroAssembler* masm,
}
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
@@ -532,6 +539,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ CallStub(&stub);
}
+
static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to FastHandleApiCall in the
@@ -553,7 +561,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
// ----------- S t a t e -------------
@@ -566,18 +574,18 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
- JSFunction* function = optimization.constant_function();
- __ mov(r5, Operand(Handle<JSFunction>(function)));
+ Handle<JSFunction> function = optimization.constant_function();
+ __ mov(r5, Operand(function));
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- Object* call_data = optimization.api_call_info()->data();
- Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (masm->isolate()->heap()->InNewSpace(call_data)) {
- __ Move(r0, api_call_info_handle);
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data());
+ if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+ __ Move(r0, api_call_info);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
- __ Move(r6, Handle<Object>(call_data));
+ __ Move(r6, call_data);
}
// Store js function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
@@ -586,11 +594,9 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// (refer to layout above).
__ add(r2, sp, Operand(2 * kPointerSize));
- Object* callback = optimization.api_call_info()->callback();
- Address api_function_address = v8::ToCData<Address>(callback);
- ApiFunction fun(api_function_address);
-
const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
@@ -608,17 +614,18 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
__ mov(ip, Operand(0));
__ str(ip, MemOperand(r0, 3 * kPointerSize));
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
- return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ AllowExternalCallThatCantCauseGC scope(masm);
+
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -630,86 +637,63 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name_(name),
extra_ic_state_(extra_ic_state) {}
- MaybeObject* Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ void Compile(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
-
CallOptimization optimization(lookup);
-
if (optimization.is_constant_call()) {
- return CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+ holder, lookup, name, optimization, miss);
} else {
- CompileRegular(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- name,
- holder,
- miss);
- return masm->isolate()->heap()->undefined_value();
+ CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+ name, holder, miss);
}
}
private:
- MaybeObject* CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ void CompileCacheable(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
-
Counters* counters = masm->isolate()->counters();
-
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 =
- optimization.GetPrototypeDepthOfExpectedType(object,
- interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 =
- optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
- lookup->holder());
- }
- can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
- (depth2 != kInvalidProtoDepth);
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(
+ object, interceptor_holder);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(
+ interceptor_holder, Handle<JSObject>(lookup->holder()));
+ }
+ can_do_fast_api_call =
+ depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
__ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
+ scratch1, scratch2);
if (can_do_fast_api_call) {
__ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -722,9 +706,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver,
- interceptor_holder, scratch1,
- scratch2, scratch3, name, depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -737,10 +721,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, scratch3, name, depth2, miss);
+ Handle<JSObject>(lookup->holder()),
+ scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -751,10 +736,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- MaybeObject* result = GenerateFastApiDirectCall(masm,
- optimization,
- arguments_.immediate());
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -775,64 +757,53 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
-
- return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
- JSObject* object,
+ Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
- JSObject* interceptor_holder,
+ Handle<String> name,
+ Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name,
- miss_label);
+ scratch1, scratch2, scratch3,
+ name, miss_label);
// Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
-
- PushInterceptorArguments(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
+ PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
5);
-
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- JSObject* holder_obj,
+ Handle<JSObject> holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ Push(holder, name_);
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(holder, name_);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ }
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch);
@@ -849,52 +820,42 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
- MacroAssembler* masm,
- GlobalObject* global,
- String* name,
- Register scratch,
- Label* miss) {
- Object* probe;
- { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
- if (!maybe_probe->ToObject(&probe)) return maybe_probe;
- }
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(Handle<Object>(cell)));
+ __ mov(scratch, Operand(cell));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
- return cell;
}
+
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- Register scratch,
- Label* miss) {
- JSObject* current = object;
- while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
if (current->IsGlobalObject()) {
- // Returns a cell or a failure.
- MaybeObject* result = GenerateCheckPropertyCell(
- masm,
- GlobalObject::cast(current),
- name,
- scratch,
- miss);
- if (result->IsFailure()) return result;
+ GenerateCheckPropertyCell(masm,
+ Handle<GlobalObject>::cast(current),
+ name,
+ scratch,
+ miss);
}
- ASSERT(current->IsJSObject());
- current = JSObject::cast(current->GetPrototype());
+ current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
}
- return NULL;
}
@@ -1008,13 +969,13 @@ static void GenerateUInt2Double(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
- JSObject* holder,
+ Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
- String* name,
+ Handle<String> name,
int save_at_depth,
Label* miss) {
// Make sure there's no overlap between holder and object registers.
@@ -1032,83 +993,52 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
- JSObject* current = object;
- while (current != holder) {
- depth++;
+ Handle<JSObject> current = object;
+ while (!current.is_identical_to(holder)) {
+ ++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- ASSERT(current->GetPrototype()->IsJSObject());
- JSObject* prototype = JSObject::cast(current->GetPrototype());
+ Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
- Object* lookup_result = NULL; // Initialization to please compiler.
- if (!maybe_lookup_result->ToObject(&lookup_result)) {
- set_failure(Failure::cast(maybe_lookup_result));
- return reg;
- }
- name = String::cast(lookup_result);
+ name = factory()->LookupSymbol(name);
}
- ASSERT(current->property_dictionary()->FindEntry(name) ==
+ ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
- MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
- if (negative_lookup->IsFailure()) {
- set_failure(Failure::cast(negative_lookup));
- return reg;
- }
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // from now the object is in holder_reg
+ reg = holder_reg; // From now on the object will be in holder_reg.
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap()->InNewSpace(prototype)) {
- // Get the map of the current object.
+ } else {
+ Handle<Map> current_map(current->map());
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
+ __ cmp(scratch1, Operand(current_map));
// Branch on the result of the map check.
__ b(ne, miss);
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
}
+ reg = holder_reg; // From now on the object will be in holder_reg.
- reg = holder_reg; // from now the object is in holder_reg
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- // Branch on the result of the map check.
- __ b(ne, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, Operand(prototype));
}
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, Operand(Handle<JSObject>(prototype)));
}
if (save_at_depth == depth) {
@@ -1119,143 +1049,131 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
current = prototype;
}
+ // Log the check depth.
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+
// Check the holder map.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
__ b(ne, miss);
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
- };
-
- // If we've skipped any global objects, it's not enough to verify
- // that their maps haven't changed. We also need to check that the
- // property cell for the property is still empty.
- MaybeObject* result = GenerateCheckPropertyCells(masm(),
- object,
- holder,
- name,
- scratch1,
- miss);
- if (result->IsFailure()) set_failure(Failure::cast(result));
+ }
+
+ // If we've skipped any global objects, it's not enough to verify that
+ // their maps haven't changed. We also need to check that the property
+ // cell for the property is still empty.
+ GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
// Return the register containing the holder.
return reg;
}
-void StubCompiler::GenerateLoadField(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
int index,
- String* name,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
}
-void StubCompiler::GenerateLoadConstant(JSObject* object,
- JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
- Object* value,
- String* name,
+ Handle<Object> value,
+ Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name,
- miss);
+ CheckPrototypes(
+ object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(r0, Operand(Handle<Object>(value)));
+ __ mov(r0, Operand(value));
__ Ret();
}
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
- name, miss);
+ Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+ scratch2, scratch3, name, miss);
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
- Handle<AccessorInfo> callback_handle(callback);
- if (heap()->InNewSpace(callback_handle->data())) {
- __ Move(scratch3, callback_handle);
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch3, callback);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
- __ Move(scratch3, Handle<Object>(callback_handle->data()));
+ __ Move(scratch3, Handle<Object>(callback->data()));
}
__ Push(reg, scratch3, name_reg);
__ mov(r0, sp); // r0 = Handle<String>
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
-
const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
+
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
const int kStackUnwindSpace = 4;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
ExternalReference ref =
ExternalReference(&fun,
ExternalReference::DIRECT_GETTER_CALL,
masm()->isolate());
- return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+ __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- String* name,
+ Handle<String> name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1271,9 +1189,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo() &&
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
- compile_followup_inline = true;
+ lookup->GetCallbackObject()->IsAccessorInfo()) {
+ compile_followup_inline =
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
}
}
@@ -1288,48 +1206,45 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- __ LeaveInternalFrame();
- __ Ret();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+ // Leave the internal frame.
}
-
- __ LeaveInternalFrame();
-
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
- if (interceptor_holder != lookup->holder()) {
+ if (*interceptor_holder != lookup->holder()) {
holder_reg = CheckPrototypes(interceptor_holder,
holder_reg,
- lookup->holder(),
+ Handle<JSObject>(lookup->holder()),
scratch1,
scratch2,
scratch3,
@@ -1341,21 +1256,21 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
- lookup->holder(), lookup->GetFieldIndex());
+ Handle<JSObject>(lookup->holder()),
+ lookup->GetFieldIndex());
__ Ret();
} else {
// We found CALLBACKS property in prototype chain of interceptor's
// holder.
ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
- ASSERT(callback != NULL);
+ Handle<AccessorInfo> callback(
+ AccessorInfo::cast(lookup->GetCallbackObject()));
ASSERT(callback->getter() != NULL);
// Tail call to runtime.
// Important invariant in CALLBACKS case: the code above must be
// structured to never clobber |receiver| register.
- __ Move(scratch2, Handle<AccessorInfo>(callback));
+ __ Move(scratch2, callback);
// holder_reg is either receiver or scratch1.
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
@@ -1392,17 +1307,17 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(Handle<String>(name)));
+ __ cmp(r2, Operand(name));
__ b(ne, miss);
}
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
- JSObject* holder,
- String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
Label* miss) {
ASSERT(holder->IsGlobalObject());
@@ -1415,7 +1330,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, miss);
}
@@ -1424,15 +1339,16 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
}
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
- JSFunction* function,
- Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Label* miss) {
// Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r3, Operand(cell));
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (heap()->InNewSpace(function)) {
+ if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1446,30 +1362,26 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ cmp(r4, r3);
- __ b(ne, miss);
} else {
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, miss);
+ __ cmp(r1, Operand(function));
}
+ __ b(ne, miss);
}
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+ Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_ic_state_);
- Object* obj;
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
- return obj;
+ extra_state_);
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1489,23 +1401,23 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
}
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1515,14 +1427,12 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss;
-
GenerateNameCheck(name, &miss);
Register receiver = r1;
-
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1531,8 +1441,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), receiver,
- holder, r3, r0, r4, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
+ name, &miss);
if (argc == 0) {
// Nothing to do, just return the length.
@@ -1541,10 +1451,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Ret();
} else {
Label call_builtin;
-
Register elements = r3;
Register end_elements = r5;
-
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1556,7 +1464,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1571,11 +1479,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements);
+ // Check if value is a smi.
+ Label with_write_barrier;
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(r4, &with_write_barrier);
+
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1585,14 +1497,31 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
- __ JumpIfNotSmi(r4, &with_write_barrier);
- __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
- __ InNewSpace(elements, r4, eq, &exit);
- __ RecordWriteHelper(elements, end_elements, r4);
+
+ __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r6, r6, &call_builtin);
+
+ // Save new length.
+ __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ add(end_elements, elements,
+ Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Ret();
@@ -1604,6 +1533,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
+ __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(r2, &no_fast_elements_check);
+ __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ bind(&no_fast_elements_check);
+
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
@@ -1630,8 +1568,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ str(r6, MemOperand(r7));
// Push the argument.
- __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
- __ str(r6, MemOperand(end_elements));
+ __ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -1656,19 +1593,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1678,25 +1615,22 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
Label miss, return_undefined, call_builtin;
-
Register receiver = r1;
Register elements = r3;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object),
- receiver, holder, elements, r4, r0, name, &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+ r4, r0, name, &miss);
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1745,20 +1679,19 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1768,21 +1701,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1790,13 +1721,12 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Context::STRING_FUNCTION_INDEX,
r0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r1;
Register index = r4;
- Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1805,20 +1735,19 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharCodeAtGenerator char_code_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_code_at_generator.GenerateFast(masm());
+ StringCharCodeAtGenerator generator(receiver,
+ index,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_code_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1829,22 +1758,21 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in r2.
- __ Move(r2, Handle<String>(name));
+ __ Move(r2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1854,21 +1782,18 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+ if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
-
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ (CallICBase::StringStubState::decode(extra_state_) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
-
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
@@ -1876,14 +1801,13 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Context::STRING_FUNCTION_INDEX,
r0,
&miss);
- ASSERT(object != holder);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
- r1, r3, r4, name, &miss);
+ ASSERT(!object.is_identical_to(holder));
+ CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r0;
Register index = r4;
- Register scratch1 = r1;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1892,21 +1816,20 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch1,
- scratch2,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- char_at_generator.GenerateFast(masm());
+ StringCharAtGenerator generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
@@ -1917,22 +1840,21 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in r2.
- __ Move(r2, Handle<String>(name));
+ __ Move(r2, name);
__ bind(&name_miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -1945,22 +1867,23 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -1976,13 +1899,13 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Convert the smi code to uint16.
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
- StringCharFromCodeGenerator char_from_code_generator(code, r0);
- char_from_code_generator.GenerateFast(masm());
+ StringCharFromCodeGenerator generator(code, r0);
+ generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
StubRuntimeCallHelper call_helper;
- char_from_code_generator.GenerateSlow(masm(), call_helper);
+ generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
@@ -1991,19 +1914,19 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2013,31 +1936,28 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -----------------------------------
if (!CpuFeatures::IsSupported(VFP3)) {
- return heap()->undefined_value();
+ return Handle<Code>::null();
}
CpuFeatures::Scope scope_vfp3(VFP3);
-
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
GenerateNameCheck(name, &miss);
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2139,19 +2059,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
@@ -2161,25 +2081,22 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// -----------------------------------
const int argc = arguments().immediate();
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
-
- if (cell == NULL) {
+ if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
} else {
- ASSERT(cell->value() == function);
- GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2240,35 +2157,33 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ bind(&miss);
// r2: function name.
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
- return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
- Object* object,
- JSObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap()->undefined_value();
- if (cell != NULL) return heap()->undefined_value();
- if (!object->IsJSObject()) return heap()->undefined_value();
+ if (object->IsGlobalObject()) return Handle<Code>::null();
+ if (!cell.is_null()) return Handle<Code>::null();
+ if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+ Handle<JSObject>::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
-
GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
@@ -2284,44 +2199,40 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), r0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
+ GenerateFastApiDirectCall(masm(), optimization, argc);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
__ bind(&miss_before_stack_reserved);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<JSFunction> function,
+ Handle<String> name,
CheckType check) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, NULL, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack
@@ -2336,16 +2247,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(masm()->isolate()->counters()->call_const(),
1, r0, r3);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- &miss);
+ CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+ name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2356,28 +2265,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- } else {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
-
- case NUMBER_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case NUMBER_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(r1, &fast);
@@ -2387,18 +2293,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
- }
- break;
- }
-
- case BOOLEAN_CHECK: {
- if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
- } else {
+ }
+ break;
+
+ case BOOLEAN_CHECK:
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2411,112 +2317,91 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+ r0, holder, r3, r1, r4, name, &miss);
+ } else {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
}
break;
- }
-
- default:
- UNREACHABLE();
}
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(function);
}
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
-
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
- MaybeObject* result = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- r1,
- r3,
- r4,
- r0,
- &miss);
- if (result->IsFailure()) {
- return result;
- }
+ CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
+ compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
+ &miss);
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
// Restore receiver.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
// Handle call cache miss.
__ bind(&miss);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<JSFunction> function,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
-
if (HasCustomCallGenerator(function)) {
- MaybeObject* maybe_result = CompileCustomCall(
- object, holder, cell, function, name);
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // undefined means bail out to regular compiler.
- if (!result->IsUndefined()) return result;
+ Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
}
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
-
GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy if
@@ -2532,39 +2417,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Jump to the cached code (tail call).
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
- } else {
- __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
- JUMP_FUNCTION, call_kind);
- }
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- MaybeObject* maybe_result = GenerateMissBranch();
- if (maybe_result->IsFailure()) return maybe_result;
+ GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2573,24 +2450,20 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- r1, r2, r3,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<AccessorInfo> callback,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2617,7 +2490,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ push(r1); // receiver
- __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
+ __ mov(ip, Operand(callback)); // callback info
__ Push(ip, r2, r0);
// Do tail-call to the runtime system.
@@ -2636,8 +2509,9 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
}
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2684,9 +2558,10 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+ Handle<GlobalObject> object,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2704,7 +2579,7 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r4, Operand(cell));
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ cmp(r5, r6);
@@ -2713,6 +2588,15 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+ __ mov(r1, r0);
+ __ RecordWriteField(r4,
+ JSGlobalPropertyCell::kValueOffset,
+ r1,
+ r2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
@@ -2728,9 +2612,9 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
- JSObject* object,
- JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> last) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- lr : return address
@@ -2746,15 +2630,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
- MaybeObject* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(last),
- name,
- r1,
- &miss);
- if (cell->IsFailure()) {
- miss.Unuse();
- return cell;
- }
+ GenerateCheckPropertyCell(
+ masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2766,14 +2643,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, heap()->empty_string());
+ return GetCode(NONEXISTENT, factory()->empty_string());
}
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
int index,
- String* name) {
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2790,24 +2667,19 @@ MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
-
- MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
- callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2816,10 +2688,10 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
}
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Object> value,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2836,9 +2708,9 @@ MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2846,17 +2718,9 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
// -----------------------------------
Label miss;
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object,
- holder,
- &lookup,
- r0,
- r2,
- r3,
- r1,
- r4,
- name,
+ GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2866,11 +2730,12 @@ MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
}
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -2881,7 +2746,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
- if (object != holder) {
+ if (!object.is_identical_to(holder)) {
__ JumpIfSmi(r0, &miss);
}
@@ -2889,7 +2754,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
- __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(r3, Operand(cell));
__ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -2913,9 +2778,9 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
// -- lr : return address
@@ -2925,7 +2790,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
@@ -2936,11 +2801,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
- String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -2949,16 +2814,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
- MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
- r4, callback, name, &miss);
- if (result->IsFailure()) {
- miss.Unuse();
- return result;
- }
-
+ GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2966,10 +2826,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<Object> value) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -2978,7 +2839,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
@@ -2990,9 +2851,10 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
- JSObject* holder,
- String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3001,20 +2863,12 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
- LookupResult lookup;
+ LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver,
- holder,
- &lookup,
- r1,
- r0,
- r2,
- r3,
- r4,
- name,
+ GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3023,7 +2877,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3032,7 +2887,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
Label miss;
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadArrayLength(masm(), r1, r2, &miss);
@@ -3043,7 +2898,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3055,7 +2911,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
// Check the key is the cached one.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
@@ -3068,7 +2924,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3080,7 +2937,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
// Check the name hasn't changed.
- __ cmp(r0, Operand(Handle<String>(name)));
+ __ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
@@ -3092,33 +2949,29 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
- MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r1,
- r2,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3130,11 +2983,9 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
int receiver_count = receiver_maps->length();
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ mov(ip, Operand(map));
+ __ mov(ip, Operand(receiver_maps->at(current)));
__ cmp(r2, ip);
- __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
}
__ bind(&miss);
@@ -3142,14 +2993,14 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
int index,
- Map* transition,
- String* name) {
+ Handle<Map> transition,
+ Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : name
@@ -3162,17 +3013,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
// Check that the name has not changed.
- __ cmp(r1, Operand(Handle<String>(name)));
+ __ cmp(r1, Operand(name));
__ b(ne, &miss);
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- r2, r1, r3,
- &miss);
+ GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3180,11 +3026,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+ Handle<Map> receiver_map) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3192,29 +3039,25 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// -- lr : return address
// -- r3 : scratch
// -----------------------------------
- Code* stub;
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r2,
- r3,
- Handle<Map>(receiver_map),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+ __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, factory()->empty_string());
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
- MapList* receiver_maps,
- CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3227,12 +3070,18 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
int receiver_count = receiver_maps->length();
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ mov(ip, Operand(map));
+ for (int i = 0; i < receiver_count; ++i) {
+ __ mov(ip, Operand(receiver_maps->at(i)));
__ cmp(r3, ip);
- __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+ } else {
+ Label next_map;
+ __ b(ne, &next_map);
+ __ mov(r3, Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -3240,11 +3089,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, NULL, MEGAMORPHIC);
+ return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+ Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
@@ -3290,12 +3140,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- SIZE_IN_WORDS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -3327,7 +3172,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r7: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
- SharedFunctionInfo* shared = function->shared();
+ Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed, next;
@@ -3454,6 +3299,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3540,6 +3386,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3784,9 +3631,9 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -3880,6 +3727,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3943,6 +3791,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4082,6 +3931,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4157,9 +4007,9 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
__ Ret();
__ bind(&miss_force_generic);
- Code* stub = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+ Handle<Code> stub =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(stub, RelocInfo::CODE_TARGET);
}
@@ -4234,8 +4084,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4244,7 +4096,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = r0;
Register key_reg = r1;
@@ -4277,15 +4129,33 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
- __ add(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ str(value_reg,
- MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ RecordWrite(scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
- receiver_reg , elements_reg);
-
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ __ add(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ add(scratch,
+ scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value_reg, MemOperand(scratch));
+ } else {
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ add(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ add(scratch,
+ scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value_reg, MemOperand(scratch));
+ __ mov(receiver_reg, value_reg);
+ __ RecordWrite(elements_reg, // Object.
+ scratch, // Address.
+ receiver_reg, // Value.
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
// value_reg (r0) is preserved.
// Done.
__ Ret();
@@ -4294,6 +4164,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4309,15 +4183,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
- Register mantissa_reg = r5;
- Register exponent_reg = r6;
+ Register elements_reg = r3;
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+ Register scratch3 = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4329,90 +4203,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ ldr(scratch,
+ __ ldr(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ cmp(key_reg, scratch);
+ __ cmp(key_reg, scratch1);
__ b(hs, &miss_force_generic);
- // Handle smi values specially.
- __ JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- __ CheckMap(value_reg,
- scratch,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- __ cmp(exponent_reg, scratch);
- __ b(ge, &maybe_nan);
-
- __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- __ bind(&have_double_value);
- __ add(scratch, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ str(exponent_reg, FieldMemOperand(scratch, offset));
- __ Ret();
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ b(gt, &is_nan);
- __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- __ cmp(mantissa_reg, Operand(0));
- __ b(eq, &have_double_value);
- __ bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- __ jmp(&have_double_value);
-
- __ bind(&smi_value);
- __ add(scratch, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- __ SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(
- masm,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- __ vstr(d0, scratch, 0);
- } else {
- __ str(mantissa_reg, MemOperand(scratch, 0));
- __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
- }
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ receiver_reg,
+ elements_reg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4420,6 +4229,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}