diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-10-10 17:58:30 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-10-10 17:58:30 -0700 |
commit | 3b1d656da56bdd403d1625a0c6a44d75cde36cc1 (patch) | |
tree | 80de8a68eacd596f0d120efc65dbbae3f324aea0 /deps/v8/src/arm | |
parent | 9bbca99107652906a060679ee95bf1ad7381cbb5 (diff) | |
download | node-new-3b1d656da56bdd403d1625a0c6a44d75cde36cc1.tar.gz |
Revert "Upgrade V8 to 3.6.6"
Not stable enough.
- Windows snapshot linking broken
- Linux crash on ./node_g test/simple/test-stream-pipe-multi.js
This reverts commit 56e6952e639ba1557a5b22333788583e9e39fa29.
Diffstat (limited to 'deps/v8/src/arm')
22 files changed, 1178 insertions, 2680 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 54c291d412..3e19a45385 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -77,11 +77,6 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); - if (host() != NULL && IsCodeTarget(rmode_)) { - Object* target_code = Code::GetCodeFromTargetAddress(target); - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } } @@ -106,10 +101,6 @@ Object** RelocInfo::target_object_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target)); - if (host() != NULL && target->IsHeapObject()) { - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), &Memory::Object_at(pc_), HeapObject::cast(target)); - } } @@ -140,12 +131,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; - if (host() != NULL) { - // TODO(1550) We are passing NULL as a slot because cell can never be on - // evacuation candidate. - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), NULL, cell); - } } @@ -162,11 +147,6 @@ void RelocInfo::set_call_address(Address target) { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; - if (host() != NULL) { - Object* target_code = Code::GetCodeFromTargetAddress(target); - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } } @@ -215,7 +195,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitEmbeddedPointer(host(), target_object_address()); + visitor->VisitPointer(target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { @@ -241,7 +221,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address()); + StaticVisitor::VisitPointer(heap, target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 329493a340..0ec36921ab 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -78,9 +78,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() { void CpuFeatures::Probe() { - unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | - CpuFeaturesImpliedByCompiler()); - ASSERT(supported_ == 0 || supported_ == standard_features); + ASSERT(!initialized_); #ifdef DEBUG initialized_ = true; #endif @@ -88,7 +86,8 @@ void CpuFeatures::Probe() { // Get the features implied by the OS and the compiler settings. This is the // minimal set of features which is also alowed for generated code in the // snapshot. - supported_ |= standard_features; + supported_ |= OS::CpuFeaturesImpliedByPlatform(); + supported_ |= CpuFeaturesImpliedByCompiler(); if (Serializer::enabled()) { // No probing for features if we might serialize (generate snapshot). @@ -2506,8 +2505,7 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - // We do not try to reuse pool constants. - RelocInfo rinfo(pc_, rmode, data, NULL); + RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. ASSERT(RelocInfo::IsDebugBreakSlot(rmode) @@ -2539,7 +2537,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL); + RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); ClearRecordedAstId(); reloc_info_writer.Write(&reloc_info_with_ast_id); } else { diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index d19b64da54..9a586936fe 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -1209,10 +1209,6 @@ class Assembler : public AssemblerBase { PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions - Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } - void instr_at_put(int pos, Instr instr) { - *reinterpret_cast<Instr*>(buffer_ + pos) = instr; - } static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } static void instr_at_put(byte* pc, Instr instr) { *reinterpret_cast<Instr*>(pc) = instr; @@ -1267,6 +1263,12 @@ class Assembler : public AssemblerBase { int buffer_space() const { return reloc_info_writer.pos() - pc_; } + // Read/patch instructions + Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast<Instr*>(buffer_ + pos) = instr; + } + // Decode branch instruction at pos and return branch target pos int target_at(int pos); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 32b7896a52..60d2081c29 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -582,11 +582,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&convert_argument); __ push(function); // Preserve the function. __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - } + __ EnterInternalFrame(); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ LeaveInternalFrame(); __ pop(function); __ mov(argument, r0); __ b(&argument_is_string); @@ -602,11 +601,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(argument); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - } + __ EnterInternalFrame(); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + __ LeaveInternalFrame(); __ Ret(); } @@ -619,12 +617,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- - Label slow, non_function_call; + Label non_function_call; // Check that the function is not a smi. __ JumpIfSmi(r1, &non_function_call); // Check that the function is a JSFunction. __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &slow); + __ b(ne, &non_function_call); // Jump to the function-specific construct stub. __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); @@ -633,19 +631,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // r0: number of arguments // r1: called object - // r2: object type - Label do_call; - __ bind(&slow); - __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(ne, &non_function_call); - __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); - __ jmp(&do_call); - __ bind(&non_function_call); - __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); - __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). __ mov(r2, Operand(0, RelocInfo::NONE)); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(r5, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -661,329 +650,321 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Isolate* isolate = masm->isolate(); // Enter a construct frame. - { - FrameScope scope(masm, StackFrame::CONSTRUCT); - - // Preserve the two incoming parameters on the stack. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ push(r0); // Smi-tagged arguments count. - __ push(r1); // Constructor function. - - // Try to allocate the object without transitioning into C code. If any of - // the preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + __ EnterConstructFrame(); + + // Preserve the two incoming parameters on the stack. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ push(r0); // Smi-tagged arguments count. + __ push(r1); // Constructor function. + + // Try to allocate the object without transitioning into C code. If any of the + // preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); - __ mov(r2, Operand(debug_step_in_fp)); - __ ldr(r2, MemOperand(r2)); - __ tst(r2, r2); - __ b(ne, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ mov(r2, Operand(debug_step_in_fp)); + __ ldr(r2, MemOperand(r2)); + __ tst(r2, r2); + __ b(ne, &rt_call); #endif - // Load the initial map and verify that it is in fact a map. - // r1: constructor function - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &rt_call); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ b(ne, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see - // comments in Runtime_NewObject in runtime.cc). In which case the - // initial map's instance type would be JS_FUNCTION_TYPE. - // r1: constructor function - // r2: initial map - __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); - __ b(eq, &rt_call); + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(r2, &rt_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &rt_call); - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); - __ ldrb(r4, constructor_count); - __ sub(r4, r4, Operand(1), SetCC); - __ strb(r4, constructor_count); - __ b(ne, &allocate); - - __ Push(r1, r2); - - __ push(r1); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - - __ pop(r2); - __ pop(r1); - - __ bind(&allocate); - } + // Check that the constructor is not constructing a JSFunction (see comments + // in Runtime_NewObject in runtime.cc). In which case the initial map's + // instance type would be JS_FUNCTION_TYPE. + // r1: constructor function + // r2: initial map + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ b(eq, &rt_call); + + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + MemOperand constructor_count = + FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); + __ ldrb(r4, constructor_count); + __ sub(r4, r4, Operand(1), SetCC); + __ strb(r4, constructor_count); + __ b(ne, &allocate); + + __ Push(r1, r2); + + __ push(r1); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(r2); + __ pop(r1); + + __ bind(&allocate); + } - // Now allocate the JSObject on the heap. - // r1: constructor function - // r2: initial map - __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); - - // Allocated the JSObject, now initialize the fields. Map is set to - // initial map and properties and elements are set to empty fixed array. - // r1: constructor function - // r2: initial map - // r3: object size - // r4: JSObject (not tagged) - __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); - __ mov(r5, r4); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - - // Fill all the in-object properties with the appropriate filler. - // r1: constructor function - // r2: initial map - // r3: object size (in words) - // r4: JSObject (not tagged) - // r5: First in-object property of JSObject (not tagged) - __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + // Now allocate the JSObject on the heap. + // r1: constructor function + // r2: initial map + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to initial + // map and properties and elements are set to empty fixed array. + // r1: constructor function + // r2: initial map + // r3: object size + // r4: JSObject (not tagged) + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Fill all the in-object properties with the appropriate filler. + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + { Label loop, entry; if (count_constructions) { - __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); - __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, - kBitsPerByte); - __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2)); - // r0: offset of first field after pre-allocated fields - if (FLAG_debug_code) { - __ cmp(r0, r6); - __ Assert(le, "Unexpected number of pre-allocated property fields."); - } - __ InitializeFieldsWithFiller(r5, r0, r7); // To allow for truncation. __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex); + } else { + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); } - __ InitializeFieldsWithFiller(r5, r6, r7); - - // Add the object tag to make the JSObject real, so that we can continue - // and jump into the continuation code at any time from now on. Any - // failures need to undo the allocation, so that the heap is in a - // consistent state and verifiable. - __ add(r4, r4, Operand(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. Continue with - // allocated object if not fall through to runtime call if it is. - // r1: constructor function - // r4: JSObject - // r5: start of next object (not tagged) - __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); - // The field instance sizes contains both pre-allocated property fields - // and in-object properties. - __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); - __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, - kBitsPerByte); - __ add(r3, r3, Operand(r6)); - __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte, - kBitsPerByte); - __ sub(r3, r3, Operand(r6), SetCC); - - // Done if no extra properties are to be allocated. - __ b(eq, &allocated); - __ Assert(pl, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: start of next object - __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateInNewSpace( - r0, - r5, - r6, - r2, - &undo_allocation, - static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); - - // Initialize the FixedArray. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); - __ mov(r2, r5); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); - __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); - - // Initialize the fields to undefined. - // r1: constructor function - // r2: First element of FixedArray (not tagged) - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); - { Label loop, entry; - if (count_constructions) { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); - } else if (FLAG_debug_code) { - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); - __ cmp(r7, r8); - __ Assert(eq, "Undefined value not loaded."); - } - __ b(&entry); - __ bind(&loop); - __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); - __ bind(&entry); - __ cmp(r2, r6); - __ b(lt, &loop); - } - - // Store the initialized FixedArray into the properties field of - // the JSObject - // r1: constructor function - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. - __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); - - // Continue with JSObject being successfully allocated - // r1: constructor function - // r4: JSObject - __ jmp(&allocated); - - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // r4: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(r4, r5); + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r5, r6); + __ b(lt, &loop); } - // Allocate the new receiver object using the runtime call. - // r1: constructor function - __ bind(&rt_call); - __ push(r1); // argument for Runtime_NewObject - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(r4, r0); + // Add the object tag to make the JSObject real, so that we can continue and + // jump into the continuation code at any time from now on. Any failures + // need to undo the allocation, so that the heap is in a consistent state + // and verifiable. + __ add(r4, r4, Operand(kHeapObjectTag)); - // Receiver for constructor call allocated. + // Check if a non-empty properties array is needed. Continue with allocated + // object if not fall through to runtime call if it is. + // r1: constructor function + // r4: JSObject + // r5: start of next object (not tagged) + __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields and + // in-object properties. + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8); + __ add(r3, r3, Operand(r6)); + __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8); + __ sub(r3, r3, Operand(r6), SetCC); + + // Done if no extra properties are to be allocated. + __ b(eq, &allocated); + __ Assert(pl, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: start of next object + __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateInNewSpace( + r0, + r5, + r6, + r2, + &undo_allocation, + static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // r1: constructor + // r3: number of elements in properties array // r4: JSObject - __ bind(&allocated); - __ push(r4); - - // Push the function and the allocated receiver from the stack. - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, kPointerSize)); - __ push(r1); // Constructor function. - __ push(r4); // Receiver. - - // Reload the number of arguments from the stack. + // r5: FixedArray (not tagged) + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ mov(r2, r5); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); + + // Initialize the fields to undefined. // r1: constructor function - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); - - // Setup pointer to last argument. - __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - - // Setup number of arguments for function call below - __ mov(r0, Operand(r3, LSR, kSmiTagSize)); - - // Copy arguments and receiver to the expression stack. - // r0: number of arguments - // r2: address of last argument (caller sp) + // r2: First element of FixedArray (not tagged) + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (count_constructions) { + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); + __ cmp(r7, r8); + __ Assert(eq, "Undefined value not loaded."); + } + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r2, r6); + __ b(lt, &loop); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject // r1: constructor function - // r3: number of arguments (smi-tagged) - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - Label loop, entry; - __ b(&entry); - __ bind(&loop); - __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); - __ push(ip); - __ bind(&entry); - __ sub(r3, r3, Operand(2), SetCC); - __ b(ge, &loop); + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. + __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); - // Call the function. - // r0: number of arguments + // Continue with JSObject being successfully allocated // r1: constructor function - if (is_api_function) { - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // r4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r4, r5); + } - // Pop the function from the stack. - // sp[0]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ pop(); + // Allocate the new receiver object using the runtime call. + // r1: constructor function + __ bind(&rt_call); + __ push(r1); // argument for Runtime_NewObject + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(r4, r0); + + // Receiver for constructor call allocated. + // r4: JSObject + __ bind(&allocated); + __ push(r4); + + // Push the function and the allocated receiver from the stack. + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, kPointerSize)); + __ push(r1); // Constructor function. + __ push(r4); // Receiver. + + // Reload the number of arguments from the stack. + // r1: constructor function + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); + + // Setup pointer to last argument. + __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Setup number of arguments for function call below + __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + + // Copy arguments and receiver to the expression stack. + // r0: number of arguments + // r2: address of last argument (caller sp) + // r1: constructor function + // r3: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); + __ push(ip); + __ bind(&entry); + __ sub(r3, r3, Operand(2), SetCC); + __ b(ge, &loop); - // Restore context from the frame. - // r0: result - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - - // If the result is a smi, it is *not* an object in the ECMA sense. - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ JumpIfSmi(r0, &use_receiver); - - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &exit); - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ ldr(r0, MemOperand(sp)); - - // Remove receiver from the stack, remove caller arguments, and - // return. - __ bind(&exit); - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); - - // Leave construct frame. + // Call the function. + // r0: number of arguments + // r1: constructor function + if (is_api_function) { + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); } + // Pop the function from the stack. + // sp[0]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ pop(); + + // Restore context from the frame. + // r0: result + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ JumpIfSmi(r0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &exit); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ ldr(r0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); + __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2); @@ -1016,64 +997,63 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r4: argv // r5-r7, cp may be clobbered - // Clear the context before we push it when entering the internal frame. + // Clear the context before we push it when entering the JS frame. __ mov(cp, Operand(0, RelocInfo::NONE)); // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Set up the context from the function argument. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Set up the context from the function argument. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Set up the roots register. - ExternalReference roots_address = - ExternalReference::roots_address(masm->isolate()); - __ mov(r10, Operand(roots_address)); + // Set up the roots register. + ExternalReference roots_address = + ExternalReference::roots_address(masm->isolate()); + __ mov(r10, Operand(roots_address)); - // Push the function and the receiver onto the stack. - __ push(r1); - __ push(r2); + // Push the function and the receiver onto the stack. + __ push(r1); + __ push(r2); - // Copy arguments to the stack in a loop. - // r1: function - // r3: argc - // r4: argv, i.e. points to first arg - Label loop, entry; - __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); - // r2 points past last arg. - __ b(&entry); - __ bind(&loop); - __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter - __ ldr(r0, MemOperand(r0)); // dereference handle - __ push(r0); // push parameter - __ bind(&entry); - __ cmp(r4, r2); - __ b(ne, &loop); - - // Initialize all JavaScript callee-saved registers, since they will be seen - // by the garbage collector as part of handlers. - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); - __ mov(r5, Operand(r4)); - __ mov(r6, Operand(r4)); - __ mov(r7, Operand(r4)); - if (kR9Available == 1) { - __ mov(r9, Operand(r4)); - } + // Copy arguments to the stack in a loop. + // r1: function + // r3: argc + // r4: argv, i.e. points to first arg + Label loop, entry; + __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); + // r2 points past last arg. + __ b(&entry); + __ bind(&loop); + __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter + __ ldr(r0, MemOperand(r0)); // dereference handle + __ push(r0); // push parameter + __ bind(&entry); + __ cmp(r4, r2); + __ b(ne, &loop); + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ mov(r5, Operand(r4)); + __ mov(r6, Operand(r4)); + __ mov(r7, Operand(r4)); + if (kR9Available == 1) { + __ mov(r9, Operand(r4)); + } - // Invoke the code and pass argc as r0. - __ mov(r0, Operand(r3)); - if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall()); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } - // Exit the JS frame and remove the parameters (except function), and - // return. - // Respect ABI stack constraint. + // Invoke the code and pass argc as r0. + __ mov(r0, Operand(r3)); + if (is_construct) { + __ Call(masm->isolate()->builtins()->JSConstructCall()); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); } + + // Exit the JS frame and remove the parameters (except function), and return. + // Respect ABI stack constraint. + __ LeaveInternalFrame(); __ Jump(lr); // r0: result @@ -1092,27 +1072,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyCompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyCompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down internal frame. - } + // Tear down temporary frame. + __ LeaveInternalFrame(); // Do a tail-call of the compiled function. __ Jump(r2); @@ -1121,27 +1100,26 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyRecompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyRecompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down internal frame. - } + // Tear down temporary frame. + __ LeaveInternalFrame(); // Do a tail-call of the compiled function. __ Jump(r2); @@ -1150,13 +1128,12 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - // Pass the function and deoptimization type to the runtime system. - __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type)))); - __ push(r0); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - } + __ EnterInternalFrame(); + // Pass the function and deoptimization type to the runtime system. + __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type)))); + __ push(r0); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + __ LeaveInternalFrame(); // Get the full codegen state from the stack and untag it -> r6. __ ldr(r6, MemOperand(sp, 0 * kPointerSize)); @@ -1196,10 +1173,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } + __ EnterInternalFrame(); + __ CallRuntime(Runtime::kNotifyOSR, 0); + __ LeaveInternalFrame(); __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); __ Ret(); } @@ -1215,11 +1191,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - } + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + __ LeaveInternalFrame(); // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. @@ -1301,23 +1276,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ b(ge, &shift_arguments); __ bind(&convert_to_object); + __ EnterInternalFrame(); // In order to preserve argument count. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. + __ push(r0); - { - // Enter an internal frame in order to preserve argument count. - FrameScope scope(masm, StackFrame::INTERNAL); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. - __ push(r0); - - __ push(r2); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(r2, r0); - - __ pop(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - - // Exit the internal frame. - } + __ push(r2); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(r2, r0); + __ pop(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ LeaveInternalFrame(); // Restore the function to r1, and the flag to r4. __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ mov(r4, Operand(0, RelocInfo::NONE)); @@ -1437,157 +1406,156 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { const int kRecvOffset = 3 * kPointerSize; const int kFunctionOffset = 4 * kPointerSize; - { - FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function - __ push(r0); - __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); - // Make r2 the space we have left. The stack might already be overflowed - // here which will cause r2 to become negative. - __ sub(r2, sp, r2); - // Check if the arguments will overflow the stack. - __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ b(gt, &okay); // Signed comparison. - - // Out of stack space. - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ push(r1); - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - // End of stack check. - - // Push current limit and index. - __ bind(&okay); - __ push(r0); // limit - __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index - __ push(r1); - - // Get the receiver. - __ ldr(r0, MemOperand(fp, kRecvOffset)); - - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &push_receiver); - - // Change context eagerly to get the right global object if necessary. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Load the shared function info while the function is still in r1. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - - // Compute the receiver. - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, &push_receiver); - - // Do not transform the receiver for strict mode functions. - __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &push_receiver); + __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function + __ push(r0); + __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + // Make r2 the space we have left. The stack might already be overflowed + // here which will cause r2 to become negative. + __ sub(r2, sp, r2); + // Check if the arguments will overflow the stack. + __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ b(gt, &okay); // Signed comparison. + + // Out of stack space. + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ push(r1); + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + // End of stack check. - // Compute the receiver in non-strict mode. - __ JumpIfSmi(r0, &call_to_object); - __ LoadRoot(r1, Heap::kNullValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); + // Push current limit and index. + __ bind(&okay); + __ push(r0); // limit + __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index + __ push(r1); - // Check if the receiver is already a JavaScript object. - // r0: receiver - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &push_receiver); + // Get the receiver. + __ ldr(r0, MemOperand(fp, kRecvOffset)); - // Convert the receiver to a regular object. - // r0: receiver - __ bind(&call_to_object); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ b(&push_receiver); + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &push_receiver); - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); - __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - // r0: receiver - __ bind(&push_receiver); - __ push(r0); + // Change context eagerly to get the right global object if necessary. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r1. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - // Copy all arguments from the array to the stack. - Label entry, loop; - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ b(&entry); + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Do not transform the receiver for strict mode functions. + __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ b(ne, &push_receiver); + + // Compute the receiver in non-strict mode. + __ JumpIfSmi(r0, &call_to_object); + __ LoadRoot(r1, Heap::kNullValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); + + // Check if the receiver is already a JavaScript object. + // r0: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &push_receiver); + + // Convert the receiver to a regular object. + // r0: receiver + __ bind(&call_to_object); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ b(&push_receiver); + + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); + __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + // r0: receiver + __ bind(&push_receiver); + __ push(r0); - // Load the current argument from the arguments array and push it to the - // stack. - // r0: current argument index - __ bind(&loop); - __ ldr(r1, MemOperand(fp, kArgsOffset)); - __ push(r1); - __ push(r0); + // Copy all arguments from the array to the stack. + Label entry, loop; + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ b(&entry); - // Call the runtime to access the property in the arguments array. - __ CallRuntime(Runtime::kGetProperty, 2); - __ push(r0); + // Load the current argument from the arguments array and push it to the + // stack. + // r0: current argument index + __ bind(&loop); + __ ldr(r1, MemOperand(fp, kArgsOffset)); + __ push(r1); + __ push(r0); - // Use inline caching to access the arguments. - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ add(r0, r0, Operand(1 << kSmiTagSize)); - __ str(r0, MemOperand(fp, kIndexOffset)); + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(r0); - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ ldr(r1, MemOperand(fp, kLimitOffset)); - __ cmp(r0, r1); - __ b(ne, &loop); + // Use inline caching to access the arguments. + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ add(r0, r0, Operand(1 << kSmiTagSize)); + __ str(r0, MemOperand(fp, kIndexOffset)); - // Invoke the function. - Label call_proxy; - ParameterCount actual(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &call_proxy); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ ldr(r1, MemOperand(fp, kLimitOffset)); + __ cmp(r0, r1); + __ b(ne, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &call_proxy); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); - frame_scope.GenerateLeaveFrame(); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Jump(lr); + // Tear down the internal frame and remove function, receiver and args. + __ LeaveInternalFrame(); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Jump(lr); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(r1); // add function proxy as last argument - __ add(r0, r0, Operand(1)); - __ mov(r2, Operand(0, RelocInfo::NONE)); - __ SetCallKind(r5, CALL_AS_METHOD); - __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); - __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(r1); // add function proxy as last argument + __ add(r0, r0, Operand(1)); + __ mov(r2, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r5, CALL_AS_METHOD); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); - // Tear down the internal frame and remove function, receiver and args. - } + __ LeaveInternalFrame(); __ add(sp, sp, Operand(3 * kPointerSize)); __ Jump(lr); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index e2a313372e..e65f6d9b69 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -189,72 +189,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } -void FastNewBlockContextStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: function. - // [sp + kPointerSize]: serialized scope info - - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - r0, r1, r2, &gc, TAG_OBJECT); - - // Load the function from the stack. - __ ldr(r3, MemOperand(sp, 0)); - - // Load the serialized scope info from the stack. - __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); - - // Setup the object header. - __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ mov(r2, Operand(Smi::FromInt(length))); - __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); - - // If this block context is nested in the global context we get a smi - // sentinel instead of a function. The block context should get the - // canonical empty function of the global context as its closure which - // we still have to look up. - Label after_sentinel; - __ JumpIfNotSmi(r3, &after_sentinel); - if (FLAG_debug_code) { - const char* message = "Expected 0 as a Smi sentinel"; - __ cmp(r3, Operand::Zero()); - __ Assert(eq, message); - } - __ ldr(r3, GlobalObjectOperand()); - __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); - __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); - __ bind(&after_sentinel); - - // Setup the fixed slots. - __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); - __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); - __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); - - // Copy the global object from the previous context. - __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX)); - - // Initialize the rest of the slots to the hole value. - __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); - for (int i = 0; i < slots_; i++) { - __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); - } - - // Remove the on-stack argument and return. - __ mov(cp, r0); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); -} - - void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -904,11 +838,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); - } + // Call C routine that may not cause GC or other trouble. + __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), + 0, 2); // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { @@ -925,29 +857,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } -bool WriteInt32ToHeapNumberStub::IsPregenerated() { - // These variants are compiled ahead of time. See next method. - if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { - return true; - } - if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { - return true; - } - // Other register combinations are generated as and when they are needed, - // so it is unsafe to call them from stubs (we can't generate a stub while - // we are generating a stub). - return false; -} - - -void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { - WriteInt32ToHeapNumberStub stub1(r1, r0, r2); - WriteInt32ToHeapNumberStub stub2(r2, r0, r3); - stub1.GetCode()->set_is_pregenerated(true); - stub2.GetCode()->set_is_pregenerated(true); -} - - // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1288,8 +1197,6 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - - AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 0, 2); __ pop(pc); // Return. @@ -1307,7 +1214,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); Label first_non_object; // Get the type of the first operand into r2 and compare it with // FIRST_SPEC_OBJECT_TYPE. @@ -1699,8 +1606,6 @@ void CompareStub::Generate(MacroAssembler* masm) { // The stub expects its argument in the tos_ register and returns its result in // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. // This stub uses VFP3 instructions. CpuFeatures::Scope scope(VFP3); @@ -1808,41 +1713,6 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { } -void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { - // We don't allow a GC during a store buffer overflow so there is no need to - // store the registers in any particular way, but we do have to store and - // restore them. - __ stm(db_w, sp, kCallerSaved | lr.bit()); - if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vstr(reg, MemOperand(sp, i * kDoubleSize)); - } - } - const int argument_count = 1; - const int fp_argument_count = 0; - const Register scratch = r1; - - AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ mov(r0, Operand(ExternalReference::isolate_address())); - __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), - argument_count); - if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, MemOperand(sp, i * kDoubleSize)); - } - __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); - } - __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). -} - - void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. @@ -1996,13 +1866,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r1, Operand(r0)); - __ pop(r0); - } + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r1, Operand(r0)); + __ pop(r0); + __ LeaveInternalFrame(); __ bind(&heapnumber_allocated); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); @@ -2043,14 +1912,13 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); - } + __ EnterInternalFrame(); + __ push(r0); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r2, r0); // Move the new heap number into r2. + // Get the heap number into r0, now that the new heap number is in r2. + __ pop(r0); + __ LeaveInternalFrame(); // Convert the heap number in r0 to an untagged integer in r1. // This can't go slow-case because it's the same number we already @@ -2160,10 +2028,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -3269,11 +3133,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); - __ CallRuntime(RuntimeFunction(), 1); - } + __ EnterInternalFrame(); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + __ LeaveInternalFrame(); __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ Ret(); @@ -3286,15 +3149,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // We return the value in d2 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - { - FrameScope scope(masm, StackFrame::INTERNAL); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ mov(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - } + __ EnterInternalFrame(); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + __ LeaveInternalFrame(); __ Ret(); } } @@ -3311,7 +3173,6 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, } else { __ vmov(r0, r1, d2); } - AllowExternalCallThatCantCauseGC scope(masm); switch (type_) { case TranscendentalCache::SIN: __ CallCFunction(ExternalReference::math_sin_double_function(isolate), @@ -3407,14 +3268,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(1, 1, scratch); __ SetCallCDoubleArguments(double_base, exponent); - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), - 1, 1); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - } + __ CallCFunction( + ExternalReference::power_double_int_function(masm->isolate()), + 1, 1); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3440,14 +3298,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(0, 2, scratch); __ SetCallCDoubleArguments(double_base, double_exponent); - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), - 0, 2); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - } + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3464,37 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() { } -bool CEntryStub::IsPregenerated() { - return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && - result_size_ == 1; -} - - -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); -} - - -void CodeStub::GenerateFPStubs() { - CEntryStub save_doubles(1, kSaveFPRegs); - Handle<Code> code = save_doubles.GetCode(); - code->set_is_pregenerated(true); - StoreBufferOverflowStub stub(kSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - code->GetIsolate()->set_fp_stubs_generated(true); -} - - -void CEntryStub::GenerateAheadOfTime() { - CEntryStub stub(1, kDontSaveFPRegs); - Handle<Code> code = stub.GetCode(); - code->set_is_pregenerated(true); -} - - void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(r0); } @@ -3606,7 +3430,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - __ mov(r3, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); + __ ldr(r3, MemOperand(ip)); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(ip)); @@ -3644,7 +3469,6 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ sub(r6, r6, Operand(kPointerSize)); // Enter the exit frame that transitions from JavaScript to C++. - FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); // Setup argc and the builtin function in callee-saved registers. @@ -3789,7 +3613,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(r5, Operand(isolate->factory()->the_hole_value())); + __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); + __ ldr(r5, MemOperand(ip)); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r5, MemOperand(ip)); @@ -4026,11 +3851,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(r0, r1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - } + __ EnterInternalFrame(); + __ Push(r0, r1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + __ LeaveInternalFrame(); __ cmp(r0, Operand::Zero()); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); @@ -4656,7 +4480,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). - __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ eor(r3, r3, Operand(1)); // Load the length from the original subject string from the previous stack // frame. Therefore we have to use fp, which points exactly to two pointer @@ -4707,7 +4532,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(isolate->factory()->the_hole_value())); + __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); + __ ldr(r1, MemOperand(r1, 0)); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(r2, 0)); @@ -4749,25 +4575,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ str(r2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. + __ mov(r3, last_match_info_elements); // Moved up to reduce latency. __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ mov(r2, subject); - __ RecordWriteField(last_match_info_elements, - RegExpImpl::kLastSubjectOffset, - r2, - r7, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); - __ RecordWriteField(last_match_info_elements, - RegExpImpl::kLastInputOffset, - subject, - r7, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ mov(r3, last_match_info_elements); + __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -4895,22 +4712,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -void CallFunctionStub::FinishCode(Code* code) { - code->set_has_function_cache(false); -} - - -void CallFunctionStub::Clear(Heap* heap, Address address) { - UNREACHABLE(); -} - - -Object* CallFunctionStub::GetCachedValue(Address address) { - UNREACHABLE(); - return NULL; -} - - void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow, non_function; @@ -6624,13 +6425,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(r1, r0); - __ mov(ip, Operand(Smi::FromInt(op_))); - __ push(ip); - __ CallExternalReference(miss, 3); - } + __ EnterInternalFrame(); + __ Push(r1, r0); + __ mov(ip, Operand(Smi::FromInt(op_))); + __ push(ip); + __ CallExternalReference(miss, 3); + __ LeaveInternalFrame(); // Compute the entry point of the rewritten stub. __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Restore registers. @@ -6813,8 +6613,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. // Registers: // result: StringDictionary to probe // r1: key @@ -6904,267 +6702,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } -struct AheadOfTimeWriteBarrierStubList { - Register object, value, address; - RememberedSetAction action; -}; - - -struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { - // Used in RegExpExecStub. - { r6, r4, r7, EMIT_REMEMBERED_SET }, - { r6, r2, r7, EMIT_REMEMBERED_SET }, - // Used in CompileArrayPushCall. - // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. - // Also used in KeyedStoreIC::GenerateGeneric. - { r3, r4, r5, EMIT_REMEMBERED_SET }, - // Used in CompileStoreGlobal. - { r4, r1, r2, OMIT_REMEMBERED_SET }, - // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. - { r1, r2, r3, EMIT_REMEMBERED_SET }, - { r3, r2, r1, EMIT_REMEMBERED_SET }, - // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. - { r2, r1, r3, EMIT_REMEMBERED_SET }, - { r3, r1, r2, EMIT_REMEMBERED_SET }, - // KeyedStoreStubCompiler::GenerateStoreFastElement. - { r4, r2, r3, EMIT_REMEMBERED_SET }, - // Null termination. - { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} -}; - - -bool RecordWriteStub::IsPregenerated() { - for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; - !entry->object.is(no_reg); - entry++) { - if (object_.is(entry->object) && - value_.is(entry->value) && - address_.is(entry->address) && - remembered_set_action_ == entry->action && - save_fp_regs_mode_ == kDontSaveFPRegs) { - return true; - } - } - return false; -} - - -bool StoreBufferOverflowStub::IsPregenerated() { - return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); -} - - -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); -} - - -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { - for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; - !entry->object.is(no_reg); - entry++) { - RecordWriteStub stub(entry->object, - entry->value, - entry->address, - entry->action, - kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - } -} - - -// Takes the input in 3 registers: address_ value_ and object_. A pointer to -// the value has just been written into the object, now this stub makes sure -// we keep the GC informed. The word in the object where the value has been -// written is in the address register. -void RecordWriteStub::Generate(MacroAssembler* masm) { - Label skip_to_incremental_noncompacting; - Label skip_to_incremental_compacting; - - // The first two instructions are generated with labels so as to get the - // offset fixed up correctly by the bind(Label*) call. We patch it back and - // forth between a compare instructions (a nop in this position) and the - // real branch when we start and stop incremental heap marking. - // See RecordWriteStub::Patch for details. - __ b(&skip_to_incremental_noncompacting); - __ b(&skip_to_incremental_compacting); - - if (remembered_set_action_ == EMIT_REMEMBERED_SET) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } - __ Ret(); - - __ bind(&skip_to_incremental_noncompacting); - GenerateIncremental(masm, INCREMENTAL); - - __ bind(&skip_to_incremental_compacting); - GenerateIncremental(masm, INCREMENTAL_COMPACTION); - - // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. - // Will be checked in IncrementalMarking::ActivateGeneratedStub. - ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); - ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); - PatchBranchIntoNop(masm, 0); - PatchBranchIntoNop(masm, Assembler::kInstrSize); -} - - -void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { - regs_.Save(masm); - - if (remembered_set_action_ == EMIT_REMEMBERED_SET) { - Label dont_need_remembered_set; - - __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); - __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. - regs_.scratch0(), - &dont_need_remembered_set); - - __ CheckPageFlag(regs_.object(), - regs_.scratch0(), - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - ne, - &dont_need_remembered_set); - - // First notify the incremental marker if necessary, then update the - // remembered set. - CheckNeedsToInformIncrementalMarker( - masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); - InformIncrementalMarker(masm, mode); - regs_.Restore(masm); - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - - __ bind(&dont_need_remembered_set); - } - - CheckNeedsToInformIncrementalMarker( - masm, kReturnOnNoNeedToInformIncrementalMarker, mode); - InformIncrementalMarker(masm, mode); - regs_.Restore(masm); - __ Ret(); -} - - -void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { - regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); - int argument_count = 3; - __ PrepareCallCFunction(argument_count, regs_.scratch0()); - Register address = - r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); - ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(r0)); - __ Move(address, regs_.address()); - __ Move(r0, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - __ Move(r1, address); - } else { - ASSERT(mode == INCREMENTAL); - __ ldr(r1, MemOperand(address, 0)); - } - __ mov(r2, Operand(ExternalReference::isolate_address())); - - AllowExternalCallThatCantCauseGC scope(masm); - if (mode == INCREMENTAL_COMPACTION) { - __ CallCFunction( - ExternalReference::incremental_evacuation_record_write_function( - masm->isolate()), - argument_count); - } else { - ASSERT(mode == INCREMENTAL); - __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), - argument_count); - } - regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); -} - - -void RecordWriteStub::CheckNeedsToInformIncrementalMarker( - MacroAssembler* masm, - OnNoNeedToInformIncrementalMarker on_no_need, - Mode mode) { - Label on_black; - Label need_incremental; - Label need_incremental_pop_scratch; - - // Let's look at the color of the object: If it is not black we don't have - // to inform the incremental marker. - __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); - - regs_.Restore(masm); - if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ Ret(); - } - - __ bind(&on_black); - - // Get the value from the slot. - __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); - - if (mode == INCREMENTAL_COMPACTION) { - Label ensure_not_white; - - __ CheckPageFlag(regs_.scratch0(), // Contains value. - regs_.scratch1(), // Scratch. - MemoryChunk::kEvacuationCandidateMask, - eq, - &ensure_not_white); - - __ CheckPageFlag(regs_.object(), - regs_.scratch1(), // Scratch. - MemoryChunk::kSkipEvacuationSlotsRecordingMask, - eq, - &need_incremental); - - __ bind(&ensure_not_white); - } - - // We need extra registers for this, so we push the object and the address - // register temporarily. - __ Push(regs_.object(), regs_.address()); - __ EnsureNotWhite(regs_.scratch0(), // The value. - regs_.scratch1(), // Scratch. - regs_.object(), // Scratch. - regs_.address(), // Scratch. - &need_incremental_pop_scratch); - __ Pop(regs_.object(), regs_.address()); - - regs_.Restore(masm); - if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ Ret(); - } - - __ bind(&need_incremental_pop_scratch); - __ Pop(regs_.object(), regs_.address()); - - __ bind(&need_incremental); - - // Fall through when we need to inform the incremental marker. -} - - #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 3ba75bab13..557f7e6d41 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -58,25 +58,6 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { - public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } - - void Generate(MacroAssembler* masm); - - virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - virtual bool SometimesSetsUpAFrame() { return false; } - - private: - SaveFPRegsMode save_doubles_; - - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } -}; - - class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -342,9 +323,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub { the_heap_number_(the_heap_number), scratch_(scratch) { } - bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - private: Register the_int_; Register the_heap_number_; @@ -393,225 +371,6 @@ class NumberToStringStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { - public: - RecordWriteStub(Register object, - Register value, - Register address, - RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode) - : object_(object), - value_(value), - address_(address), - remembered_set_action_(remembered_set_action), - save_fp_regs_mode_(fp_mode), - regs_(object, // An input reg. - address, // An input reg. - value) { // One scratch reg. - } - - enum Mode { - STORE_BUFFER_ONLY, - INCREMENTAL, - INCREMENTAL_COMPACTION - }; - - virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - virtual bool SometimesSetsUpAFrame() { return false; } - - static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { - masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); - ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); - } - - static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { - masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); - ASSERT(Assembler::IsBranch(masm->instr_at(pos))); - } - - static Mode GetMode(Code* stub) { - Instr first_instruction = Assembler::instr_at(stub->instruction_start()); - Instr second_instruction = Assembler::instr_at(stub->instruction_start() + - Assembler::kInstrSize); - - if (Assembler::IsBranch(first_instruction)) { - return INCREMENTAL; - } - - ASSERT(Assembler::IsTstImmediate(first_instruction)); - - if (Assembler::IsBranch(second_instruction)) { - return INCREMENTAL_COMPACTION; - } - - ASSERT(Assembler::IsTstImmediate(second_instruction)); - - return STORE_BUFFER_ONLY; - } - - static void Patch(Code* stub, Mode mode) { - MacroAssembler masm(NULL, - stub->instruction_start(), - stub->instruction_size()); - switch (mode) { - case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || - GetMode(stub) == INCREMENTAL_COMPACTION); - PatchBranchIntoNop(&masm, 0); - PatchBranchIntoNop(&masm, Assembler::kInstrSize); - break; - case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); - PatchNopIntoBranch(&masm, 0); - break; - case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); - PatchNopIntoBranch(&masm, Assembler::kInstrSize); - break; - } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); - } - - private: - // This is a helper class for freeing up 3 scratch registers. The input is - // two registers that must be preserved and one scratch register provided by - // the caller. - class RegisterAllocation { - public: - RegisterAllocation(Register object, - Register address, - Register scratch0) - : object_(object), - address_(address), - scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); - scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); - } - - void Save(MacroAssembler* masm) { - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); - // We don't have to save scratch0_ because it was given to us as - // a scratch register. - masm->push(scratch1_); - } - - void Restore(MacroAssembler* masm) { - masm->pop(scratch1_); - } - - // If we have to call into C then we need to save and restore all caller- - // saved registers that were not already preserved. The scratch registers - // will be restored by other means so we don't bother pushing them here. - void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { - masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); - if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - masm->sub(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); - // Save all VFP registers except d0. - for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } - } - } - - inline void RestoreCallerSaveRegisters(MacroAssembler*masm, - SaveFPRegsMode mode) { - if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP3); - // Restore all VFP registers except d0. - for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } - masm->add(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); - } - masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); - } - - inline Register object() { return object_; } - inline Register address() { return address_; } - inline Register scratch0() { return scratch0_; } - inline Register scratch1() { return scratch1_; } - - private: - Register object_; - Register address_; - Register scratch0_; - Register scratch1_; - - Register GetRegThatIsNotOneOf(Register r1, - Register r2, - Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { - Register candidate = Register::FromAllocationIndex(i); - if (candidate.is(r1)) continue; - if (candidate.is(r2)) continue; - if (candidate.is(r3)) continue; - return candidate; - } - UNREACHABLE(); - return no_reg; - } - friend class RecordWriteStub; - }; - - enum OnNoNeedToInformIncrementalMarker { - kReturnOnNoNeedToInformIncrementalMarker, - kUpdateRememberedSetOnNoNeedToInformIncrementalMarker - }; - - void Generate(MacroAssembler* masm); - void GenerateIncremental(MacroAssembler* masm, Mode mode); - void CheckNeedsToInformIncrementalMarker( - MacroAssembler* masm, - OnNoNeedToInformIncrementalMarker on_no_need, - Mode mode); - void InformIncrementalMarker(MacroAssembler* masm, Mode mode); - - Major MajorKey() { return RecordWrite; } - - int MinorKey() { - return ObjectBits::encode(object_.code()) | - ValueBits::encode(value_.code()) | - AddressBits::encode(address_.code()) | - RememberedSetActionBits::encode(remembered_set_action_) | - SaveFPRegsModeBits::encode(save_fp_regs_mode_); - } - - bool MustBeInStubCache() { - // All stubs must be registered in the stub cache - // otherwise IncrementalMarker would not be able to find - // and patch it. - return true; - } - - void Activate(Code* code) { - code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); - } - - class ObjectBits: public BitField<int, 0, 4> {}; - class ValueBits: public BitField<int, 4, 4> {}; - class AddressBits: public BitField<int, 8, 4> {}; - class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; - class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; - - Register object_; - Register value_; - Register address_; - RememberedSetAction remembered_set_action_; - SaveFPRegsMode save_fp_regs_mode_; - Label slow_; - RegisterAllocation regs_; -}; - - // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM. @@ -816,8 +575,6 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); - virtual bool SometimesSetsUpAFrame() { return false; } - private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -830,7 +587,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 3993ed02be..bf748a9b6a 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -38,16 +38,12 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); - masm->set_has_frame(true); + masm->EnterInternalFrame(); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); - masm->set_has_frame(false); + masm->LeaveInternalFrame(); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 1c0d508d2d..d27982abac 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -69,6 +69,16 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + // Constants related to patching of inlined load/store. + static int GetInlinedKeyedLoadInstructionsAfterPatch() { + return FLAG_debug_code ? 32 : 13; + } + static const int kInlinedKeyedStoreInstructionsAfterPatch = 8; + static int GetInlinedNamedStoreInstructionsAfterPatch() { + ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); + return Isolate::Current()->inlined_write_barrier_size() + 4; + } + private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index b866f9cc8d..07a22722c8 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -132,58 +132,56 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, RegList non_object_regs) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - if ((object_regs | non_object_regs) != 0) { - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ tst(reg, Operand(0xc0000000)); - __ Assert(eq, "Unable to encode value as smi"); - } - __ mov(reg, Operand(reg, LSL, kSmiTagSize)); + __ EnterInternalFrame(); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + if ((object_regs | non_object_regs) != 0) { + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ tst(reg, Operand(0xc0000000)); + __ Assert(eq, "Unable to encode value as smi"); } + __ mov(reg, Operand(reg, LSL, kSmiTagSize)); } - __ stm(db_w, sp, object_regs | non_object_regs); } + __ stm(db_w, sp, object_regs | non_object_regs); + } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments - __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - if ((object_regs | non_object_regs) != 0) { - __ ldm(ia_w, sp, object_regs | non_object_regs); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - __ mov(reg, Operand(reg, LSR, kSmiTagSize)); - } - if (FLAG_debug_code && - (((object_regs |non_object_regs) & (1 << r)) == 0)) { - __ mov(reg, Operand(kDebugZapValue)); - } + __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments + __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + if ((object_regs | non_object_regs) != 0) { + __ ldm(ia_w, sp, object_regs | non_object_regs); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ mov(reg, Operand(reg, LSR, kSmiTagSize)); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ mov(reg, Operand(kDebugZapValue)); } } - - // Leave the internal frame. } + __ LeaveInternalFrame(); + // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index bb03d740d1..00357f76db 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -112,19 +112,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } #endif - Isolate* isolate = code->GetIsolate(); - // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = isolate->deoptimizer_data(); + DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; - // We might be in the middle of incremental marking with compaction. - // Tell collector to treat this code object in a special way and - // ignore all slots that might have been recorded on it. - isolate->heap()->mark_compact_collector()->InvalidateCode(code); - // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -141,8 +134,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, - Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; @@ -177,13 +169,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, reinterpret_cast<uint32_t>(check_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast<uint32_t>(replacement_code->entry()); - - RelocInfo rinfo(pc_after - 2 * kInstrSize, - RelocInfo::CODE_TARGET, - 0, - unoptimized_code); - unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( - unoptimized_code, &rinfo, replacement_code); } @@ -208,9 +193,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, reinterpret_cast<uint32_t>(replacement_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast<uint32_t>(check_code->entry()); - - check_code->GetHeap()->incremental_marking()-> - RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code); } @@ -650,10 +632,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(r5, Operand(ExternalReference::isolate_address())); __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); - } + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); // Preserve "deoptimizer" object in register r0 and get the input // frame descriptor pointer to r1 (deoptimizer->input_); @@ -707,11 +686,8 @@ void Deoptimizer::EntryGenerator::Generate() { // r0: deoptimizer object; r1: scratch. __ PrepareCallCFunction(1, r1); // Call Deoptimizer::ComputeOutputFrames(). - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); - } + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 1); __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index c66ceee931..26bbd82d00 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -70,16 +70,6 @@ static const RegList kCalleeSaved = 1 << 10 | // r10 v7 1 << 11; // r11 v8 (fp in JavaScript code) -// When calling into C++ (only for C++ calls that can't cause a GC). -// The call code will take care of lr, fp, etc. -static const RegList kCallerSaved = - 1 << 0 | // r0 - 1 << 1 | // r1 - 1 << 2 | // r2 - 1 << 3 | // r3 - 1 << 9; // r9 - - static const int kNumCalleeSaved = 7 + kR9Available; // Double registers d8 to d15 are callee-saved. diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index f9a880f56b..50ed8b1da7 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -39,7 +39,6 @@ #include "stub-cache.h" #include "arm/code-stubs-arm.h" -#include "arm/macro-assembler-arm.h" namespace v8 { namespace internal { @@ -156,11 +155,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - int locals_count = info->scope()->num_stack_slots(); __ Push(lr, fp, cp, r1); @@ -206,12 +200,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - MemOperand target = ContextOperand(cp, var->index()); - __ str(r0, target); - - // Update the write barrier. - __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); + __ mov(r1, Operand(Context::SlotOffset(var->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); } } } @@ -670,15 +665,12 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ str(src, location); - // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { - __ RecordWriteContextSlot(scratch0, - location.offset(), - src, - scratch1, - kLRHasBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(scratch0, + Operand(Context::SlotOffset(var->index())), + scratch1, + src); } } @@ -754,14 +746,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ str(result_register(), ContextOperand(cp, variable->index())); int offset = Context::SlotOffset(variable->index()); // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(cp, - offset, - result_register(), - r2, - kLRHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ mov(r1, Operand(cp)); + __ RecordWrite(r1, Operand(offset), r2, result_register()); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); } else if (mode == Variable::CONST || mode == Variable::LET) { Comment cmnt(masm_, "[ Declaration"); @@ -1225,17 +1211,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == Variable::DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST || - local->mode() == Variable::LET) { + if (local->mode() == Variable::CONST) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); - if (local->mode() == Variable::CONST) { - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - } else { // Variable::LET - __ b(ne, done); - __ mov(r0, Operand(var->name())); - __ push(r0); - __ CallRuntime(Runtime::kThrowReferenceError, 1); - } + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); } __ jmp(done); } @@ -1512,23 +1490,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ ldr(r6, MemOperand(sp)); // Copy of array literal. - __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); + __ ldr(r1, MemOperand(sp)); // Copy of array literal. + __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ str(result_register(), FieldMemOperand(r1, offset)); - Label no_map_change; - __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store with r0 as the scratch // register. - __ RecordWriteField( - r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ CheckFastSmiOnlyElements(r3, r2, &no_map_change); - __ push(r6); // Copy of array literal. - __ CallRuntime(Runtime::kNonSmiElementStored, 1); - __ bind(&no_map_change); + __ RecordWrite(r1, Operand(offset), r2, result_register()); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1900,8 +1869,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // RecordWrite may destroy all its register arguments. __ mov(r3, result_register()); int offset = Context::SlotOffset(var->index()); - __ RecordWriteContextSlot( - r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r1, Operand(offset), r2, r3); } } @@ -1919,9 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ str(r0, location); if (var->IsContextSlot()) { __ mov(r3, r0); - int offset = Context::SlotOffset(var->index()); - __ RecordWriteContextSlot( - r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3); } } else { ASSERT(var->IsLookupSlot()); @@ -2696,24 +2662,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. - // Assume that there are only two callable types, and one of them is at - // either end of the type range for JS object types. Saves extra comparisons. - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); // Map is now in r0. __ b(lt, &null); - STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == - FIRST_SPEC_OBJECT_TYPE + 1); - __ b(eq, &function); - - __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE)); - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == - LAST_SPEC_OBJECT_TYPE - 1); - __ b(eq, &function); - // Assume that there is no larger type. - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); - - // Check if the constructor in the map is a JS function. + + // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and + // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after + // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == + LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE)); + __ b(ge, &function); + + // Check if the constructor in the map is a function. __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); __ b(ne, &non_function_constructor); @@ -2891,9 +2853,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. - __ mov(r2, r0); - __ RecordWriteField( - r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3); __ bind(&done); context()->Plug(r0); @@ -3181,31 +3141,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ str(scratch1, MemOperand(index2, 0)); __ str(scratch2, MemOperand(index1, 0)); - Label no_remembered_set; - __ CheckPageFlag(elements, - scratch1, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - ne, - &no_remembered_set); + Label new_space; + __ InNewSpace(elements, scratch1, eq, &new_space); // Possible optimization: do a check that both values are Smis // (or them and test against Smi mask.) - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index1, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index2, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); + __ mov(scratch1, elements); + __ RecordWriteHelper(elements, index1, scratch2); + __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. - __ bind(&no_remembered_set); + __ bind(&new_space); // We are done. Drop elements from the stack, and return undefined. __ Drop(3); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); @@ -3953,14 +3898,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check) { - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - context()->PrepareTest(&materialize_true, &materialize_false, - &if_true, &if_false, &fall_through); - + Handle<String> check, + Label* if_true, + Label* if_false, + Label* fall_through) { { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -4001,11 +3942,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(r0, if_false); - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); - __ b(eq, if_true); - __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); - Split(eq, if_true, if_false, fall_through); + __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE); + Split(ge, if_true, if_false, fall_through); + } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(r0, if_false); if (!FLAG_harmony_typeof) { @@ -4024,7 +3963,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } - context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, + Label* if_true, + Label* if_false, + Label* fall_through) { + VisitForAccumulatorValue(expr); + PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); + + __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); + Split(eq, if_true, if_false, fall_through); } @@ -4032,12 +3982,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr)) return; - // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. + Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4045,6 +3992,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { + context()->Plug(if_true, if_false); + return; + } + Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -4131,9 +4085,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, - Expression* sub_expr, - NilValue nil) { +void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { + Comment cmnt(masm_, "[ CompareToNull"); Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4141,21 +4094,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(sub_expr); + VisitForAccumulatorValue(expr->expression()); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(r1, nil_value); + __ LoadRoot(r1, Heap::kNullValueRootIndex); __ cmp(r0, r1); - if (expr->op() == Token::EQ_STRICT) { + if (expr->is_strict()) { Split(eq, if_true, if_false, fall_through); } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; __ b(eq, if_true); - __ LoadRoot(r1, other_nil_value); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ cmp(r0, r1); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 6e0badca1d..2e49cae928 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -208,8 +208,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ mov(scratch1, value); - __ RecordWrite( - elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(elements, scratch2, scratch1); } @@ -505,22 +504,21 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack. __ ldr(r3, MemOperand(sp, argc * kPointerSize)); - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Push the receiver and the name of the function. - __ Push(r3, r2); + // Push the receiver and the name of the function. + __ Push(r3, r2); - // Call the entry. - __ mov(r0, Operand(2)); - __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); + // Call the entry. + __ mov(r0, Operand(2)); + __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); - CEntryStub stub(1); - __ CallStub(&stub); + CEntryStub stub(1); + __ CallStub(&stub); - // Move result to r1 and leave the internal frame. - __ mov(r1, Operand(r0)); - } + // Move result to r1 and leave the internal frame. + __ mov(r1, Operand(r0)); + __ LeaveInternalFrame(); // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -652,13 +650,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r2); // save the key - __ Push(r1, r2); // pass the receiver and the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(r2); // restore the key - } + __ EnterInternalFrame(); + __ push(r2); // save the key + __ Push(r1, r2); // pass the receiver and the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(r2); // restore the key + __ LeaveInternalFrame(); __ mov(r1, r0); __ jmp(&do_call); @@ -911,8 +908,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow); __ str(r0, mapped_location); __ add(r6, r3, r5); - __ mov(r9, r0); - __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r3, r6, r9); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in r3. @@ -920,8 +916,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); __ str(r0, unmapped_location); __ add(r6, r3, r4); - __ mov(r9, r0); - __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(r3, r6, r9); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); @@ -1272,17 +1267,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, array, extra, check_if_double_array; - Label fast_object_with_map_check, fast_object_without_map_check; - Label fast_double_with_map_check, fast_double_without_map_check; + Label slow, fast, array, extra; // Register usage. Register value = r0; Register key = r1; Register receiver = r2; Register elements = r3; // Elements array of the receiver. - Register elements_map = r6; - Register receiver_map = r7; // r4 and r5 are used as general scratch registers. // Check that the key is a smi. @@ -1290,26 +1281,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // Check that the object isn't a smi. __ JumpIfSmi(receiver, &slow); // Get the map of the object. - __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); + __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); __ b(ne, &slow); // Check if the object is a JS array or not. - __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ cmp(r4, Operand(JS_ARRAY_TYPE)); __ b(eq, &array); // Check that the object is some kind of JSObject. - __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); + __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE)); __ b(lt, &slow); + __ cmp(r4, Operand(JS_PROXY_TYPE)); + __ b(eq, &slow); + __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(eq, &slow); // Object case: Check key against length in the elements array. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + // Check that the object is in fast mode and writable. + __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(r4, ip); + __ b(ne, &slow); // Check array bounds. Both the key and the length of FixedArray are smis. __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); - __ b(lo, &fast_object_with_map_check); + __ b(lo, &fast); // Slow case, handle jump to runtime. __ bind(&slow); @@ -1330,31 +1330,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); __ b(hs, &slow); - __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_array_map())); - __ b(ne, &check_if_double_array); // Calculate key + 1 as smi. STATIC_ASSERT(kSmiTag == 0); __ add(r4, key, Operand(Smi::FromInt(1))); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ b(&fast_object_without_map_check); - - __ bind(&check_if_double_array); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_double_array_map())); - __ b(ne, &slow); - // Add 1 to key, and go to common element store code for doubles. - STATIC_ASSERT(kSmiTag == 0); - __ add(r4, key, Operand(Smi::FromInt(1))); - __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ jmp(&fast_double_without_map_check); + __ b(&fast); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. __ bind(&array); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(r4, ip); + __ b(ne, &slow); // Check the key against the length in the array. __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1362,57 +1352,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ b(hs, &extra); // Fall through to fast case. - __ bind(&fast_object_with_map_check); - Register scratch_value = r4; - Register address = r5; - __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_array_map())); - __ b(ne, &fast_double_with_map_check); - __ bind(&fast_object_without_map_check); - // Smi stores don't require further checks. - Label non_smi_value; - __ JumpIfNotSmi(value, &non_smi_value); - // It's irrelevant whether array is smi-only or not when writing a smi. - __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); - __ Ret(); - - __ bind(&non_smi_value); - // Escape to slow case when writing non-smi into smi-only array. - __ CheckFastObjectElements(receiver_map, scratch_value, &slow); - // Fast elements array, store the value to the elements backing store. - __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); + __ bind(&fast); + // Fast case, store the value to the elements backing store. + __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(r5)); + // Skip write barrier if the written value is a smi. + __ tst(value, Operand(kSmiTagMask)); + __ Ret(eq); // Update write barrier for the elements array address. - __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, - address, - scratch_value, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ Ret(); + __ sub(r4, r5, Operand(elements)); + __ RecordWrite(elements, Operand(r4), r5, r6); - __ bind(&fast_double_with_map_check); - // Check for fast double array case. If this fails, call through to the - // runtime. - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_double_array_map())); - __ b(ne, &slow); - __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, - key, - receiver, - elements, - r4, - r5, - r6, - r7, - &slow); __ Ret(); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 84959397b6..30ccd05bee 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -212,11 +212,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNilAndBranch::PrintDataTo(StringStream* stream) { +void LIsNullAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(kind() == kStrictEquality ? " === " : " == "); - stream->Add(nil() == kNullValue ? "null" : "undefined"); + stream->Add(is_strict() ? " === null" : " == null"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -712,9 +711,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - int argument_index_accumulator = 0; - instr->set_environment(CreateEnvironment(hydrogen_env, - &argument_index_accumulator)); + instr->set_environment(CreateEnvironment(hydrogen_env)); return instr; } @@ -997,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment( - HEnvironment* hydrogen_env, - int* argument_index_accumulator) { +LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = - CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); + LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1013,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( argument_count_, value_count, outer); + int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1021,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument((*argument_index_accumulator)++); + op = new LArgument(argument_index++); } else { op = UseAny(value); } @@ -1449,9 +1444,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsNilAndBranch(UseRegisterAtStart(instr->value())); + return new LIsNullAndBranch(UseRegisterAtStart(instr->value())); } @@ -1739,7 +1734,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->RequiresHoleCheck() + return instr->check_hole_value() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1753,11 +1748,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { - LOperand* temp = TempRegister(); - LOperand* value = UseTempRegister(instr->value()); - LInstruction* result = new LStoreGlobalCell(value, temp); - if (instr->RequiresHoleCheck()) result = AssignEnvironment(result); - return result; + if (instr->check_hole_value()) { + LOperand* temp = TempRegister(); + LOperand* value = UseRegister(instr->value()); + return AssignEnvironment(new LStoreGlobalCell(value, temp)); + } else { + LOperand* value = UseRegisterAtStart(instr->value()); + return new LStoreGlobalCell(value, NULL); + } } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 73c7e459c3..8c18760fd1 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -107,7 +107,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNilAndBranch) \ + V(IsNullAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -627,17 +627,16 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNilAndBranch: public LControlInstruction<1, 0> { +class LIsNullAndBranch: public LControlInstruction<1, 0> { public: - explicit LIsNilAndBranch(LOperand* value) { + explicit LIsNullAndBranch(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) - EqualityKind kind() const { return hydrogen()->kind(); } - NilValue nil() const { return hydrogen()->nil(); } + bool is_strict() const { return hydrogen()->is_strict(); } virtual void PrintDataTo(StringStream* stream); }; @@ -2160,8 +2159,7 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, - int* argument_index_accumulator); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 70ef884816..f5d7449149 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -82,14 +82,6 @@ bool LCodeGen::GenerateCode() { status_ = GENERATING; CpuFeatures::Scope scope1(VFP3); CpuFeatures::Scope scope2(ARMv7); - - CodeStub::GenerateFPStubs(); - - // Open a frame scope to indicate that there is a frame on the stack. The - // NONE indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::NONE); - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -214,11 +206,13 @@ bool LCodeGen::GeneratePrologue() { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - MemOperand target = ContextOperand(cp, var->index()); - __ str(r0, target); - // Update the write barrier. This clobbers r3 and r0. - __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); + __ mov(r1, Operand(Context::SlotOffset(var->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); } } Comment(";;; End allocate local context"); @@ -268,9 +262,6 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -748,7 +739,7 @@ void LCodeGen::RecordSafepoint( int deoptimization_index) { ASSERT(expected_safepoint_kind_ == kind); - const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); + const ZoneList<LOperand*>* operands = pointers->operands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); for (int i = 0; i < operands->length(); i++) { @@ -1041,7 +1032,6 @@ void LCodeGen::DoDivI(LDivI* instr) { virtual void Generate() { codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } - virtual LInstruction* instr() { return instr_; } private: LDivI* instr_; }; @@ -1753,35 +1743,25 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { +void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { Register scratch = scratch0(); Register reg = ToRegister(instr->InputAt(0)); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - // If the expression is known to be untagged or a smi, then it's definitely - // not null, and it can't be a an undetectable object. - if (instr->hydrogen()->representation().IsSpecialization() || - instr->hydrogen()->type().IsSmi()) { - EmitGoto(false_block); - return; - } + // TODO(fsc): If the expression is known to be a smi, then it's + // definitely not null. Jump to the false block. int true_block = chunk_->LookupDestination(instr->true_block_id()); - Heap::RootListIndex nil_value = instr->nil() == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(ip, nil_value); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + __ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(reg, ip); - if (instr->kind() == kStrictEquality) { + if (instr->is_strict()) { EmitBranch(true_block, false_block, eq); } else { - Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ b(eq, true_label); - __ LoadRoot(ip, other_nil_value); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(reg, ip); __ b(eq, true_label); __ JumpIfSmi(reg, false_label); @@ -1938,36 +1918,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. __ JumpIfSmi(input, is_false); + __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); + __ b(lt, is_false); + // Map is now in temp. + // Functions have class 'Function'. + __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - // Assuming the following assertions, we can use the same compares to test - // for both being a function type and being in the object type range. - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == - FIRST_SPEC_OBJECT_TYPE + 1); - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == - LAST_SPEC_OBJECT_TYPE - 1); - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); - __ b(lt, is_false); - __ b(eq, is_true); - __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); - __ b(eq, is_true); + __ b(ge, is_true); } else { - // Faster code path to avoid two compares: subtract lower bound from the - // actual type and do a signed compare with the width of the type range. - __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); - __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ b(gt, is_false); + __ b(ge, is_false); } - // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); + // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and + // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after + // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == + LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + // Objects with a non-function constructor have class 'Object'. __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -2044,8 +2016,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - virtual LInstruction* instr() { return instr_; } + Label* map_check() { return &map_check_; } + private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -2207,7 +2180,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - if (instr->hydrogen()->RequiresHoleCheck()) { + if (instr->hydrogen()->check_hole_value()) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(result, ip); DeoptimizeIf(eq, instr->environment()); @@ -2230,7 +2203,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); // Load the cell. __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell()))); @@ -2239,7 +2211,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. - if (instr->hydrogen()->RequiresHoleCheck()) { + if (instr->hydrogen()->check_hole_value()) { + Register scratch2 = ToRegister(instr->TempAt(0)); __ ldr(scratch2, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -2249,15 +2222,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // Store the value. __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); - - // Cells are always in the remembered set. - __ RecordWriteField(scratch, - JSGlobalPropertyCell::kValueOffset, - value, - scratch2, - kLRHasBeenSaved, - kSaveFPRegs, - OMIT_REMEMBERED_SET); } @@ -2283,15 +2247,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - MemOperand target = ContextOperand(context, instr->slot_index()); - __ str(value, target); + __ str(value, ContextOperand(context, instr->slot_index())); if (instr->needs_write_barrier()) { - __ RecordWriteContextSlot(context, - target.offset(), - value, - scratch0(), - kLRHasBeenSaved, - kSaveFPRegs); + int offset = Context::SlotOffset(instr->slot_index()); + __ RecordWrite(context, Operand(offset), value, scratch0()); } } @@ -2541,9 +2500,13 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + // TODO(danno): If no hole check is required, there is no need to allocate + // elements into a temporary register, instead scratch can be used. + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } __ vldr(result, elements, 0); } @@ -2614,7 +2577,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -2944,7 +2906,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } - virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3241,7 +3202,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -3301,8 +3262,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ str(value, FieldMemOperand(object, offset)); if (instr->needs_write_barrier()) { // Update the write barrier for the object for in-object properties. - __ RecordWriteField( - object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs); + __ RecordWrite(object, Operand(offset), value, scratch); } } else { __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -3310,8 +3270,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWriteField( - scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs); + __ RecordWrite(scratch, Operand(offset), value, object); } } } @@ -3342,13 +3301,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register scratch = scratch0(); - // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - // conversion, so it deopts in that case. - if (instr->hydrogen()->ValueNeedsSmiCheck()) { - __ tst(value, Operand(kSmiTagMask)); - DeoptimizeIf(ne, instr->environment()); - } - // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3363,8 +3315,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { if (instr->hydrogen()->NeedsWriteBarrier()) { // Compute address of modified element and store it into key register. - __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs); + __ add(key, scratch, Operand(FixedArray::kHeaderSize)); + __ RecordWrite(elements, key, value); } } @@ -3465,7 +3417,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3501,7 +3452,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3625,7 +3575,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3697,7 +3646,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } - virtual LInstruction* instr() { return instr_; } private: LNumberTagI* instr_; }; @@ -3763,7 +3711,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } - virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3872,6 +3819,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } +class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + private: + LTaggedToI* instr_; +}; + + void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(instr->InputAt(0)); Register scratch1 = scratch0(); @@ -3954,16 +3911,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LTaggedToI* instr_; - }; - LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -4396,12 +4343,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = ne; } else if (type_name->Equals(heap()->function_symbol())) { - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); - __ b(eq, true_label); - __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); - final_branch_condition = eq; + __ CompareObjectType(input, input, scratch, + FIRST_CALLABLE_SPEC_OBJECT_TYPE); + final_branch_condition = ge; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4523,7 +4468,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 711e4595e7..ead8489034 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -376,20 +376,16 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { + : codegen_(codegen), external_exit_(NULL) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; - virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -400,7 +396,6 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; - int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index b274b2fe42..f37f310218 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -42,8 +42,7 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true), - has_frame_(false) { + allow_stub_calls_(true) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), isolate()); @@ -407,6 +406,32 @@ void MacroAssembler::StoreRoot(Register source, } +void MacroAssembler::RecordWriteHelper(Register object, + Register address, + Register scratch) { + if (emit_debug_code()) { + // Check that the object is not in new space. + Label not_in_new_space; + InNewSpace(object, scratch, ne, ¬_in_new_space); + Abort("new-space object passed to RecordWriteHelper"); + bind(¬_in_new_space); + } + + // Calculate page address. + Bfc(object, 0, kPageSizeBits); + + // Calculate region number. + Ubfx(address, address, Page::kRegionSizeLog2, + kPageSizeBits - Page::kRegionSizeLog2); + + // Mark region dirty. + ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); + mov(ip, Operand(1)); + orr(scratch, scratch, Operand(ip, LSL, address)); + str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); +} + + void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cond, @@ -418,52 +443,38 @@ void MacroAssembler::InNewSpace(Register object, } -void MacroAssembler::RecordWriteField( - Register object, - int offset, - Register value, - Register dst, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - // First, check if a write barrier is even needed. The tests below - // catch stores of Smis. - Label done; +// Will clobber 4 registers: object, offset, scratch, ip. The +// register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWrite(Register object, + Operand offset, + Register scratch0, + Register scratch1) { + // The compiled code assumes that record write doesn't change the + // context register, so we check that none of the clobbered + // registers are cp. + ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); - // Skip barrier if writing a smi. - if (smi_check == INLINE_SMI_CHECK) { - JumpIfSmi(value, &done); - } + Label done; - // Although the object register is tagged, the offset is relative to the start - // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + // First, test that the object is not in the new space. We cannot set + // region marks for new space pages. + InNewSpace(object, scratch0, eq, &done); - add(dst, object, Operand(offset - kHeapObjectTag)); - if (emit_debug_code()) { - Label ok; - tst(dst, Operand((1 << kPointerSizeLog2) - 1)); - b(eq, &ok); - stop("Unaligned cell in write barrier"); - bind(&ok); - } + // Add offset into the object. + add(scratch0, object, offset); - RecordWrite(object, - dst, - value, - lr_status, - save_fp, - remembered_set_action, - OMIT_SMI_CHECK); + // Record the actual write. + RecordWriteHelper(object, scratch0, scratch1); bind(&done); - // Clobber clobbered input registers when running with the debug-code flag + // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); - mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); + mov(object, Operand(BitCast<int32_t>(kZapValue))); + mov(scratch0, Operand(BitCast<int32_t>(kZapValue))); + mov(scratch1, Operand(BitCast<int32_t>(kZapValue))); } } @@ -473,94 +484,29 @@ void MacroAssembler::RecordWriteField( // tag is shifted away. void MacroAssembler::RecordWrite(Register object, Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + Register scratch) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are cp. - ASSERT(!address.is(cp) && !value.is(cp)); + ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); Label done; - if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); - tst(value, Operand(kSmiTagMask)); - b(eq, &done); - } - - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - eq, - &done); - CheckPageFlag(object, - value, // Used as scratch. - MemoryChunk::kPointersFromHereAreInterestingMask, - eq, - &done); + // First, test that the object is not in the new space. We cannot set + // region marks for new space pages. + InNewSpace(object, scratch, eq, &done); // Record the actual write. - if (lr_status == kLRHasNotBeenSaved) { - push(lr); - } - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); - CallStub(&stub); - if (lr_status == kLRHasNotBeenSaved) { - pop(lr); - } + RecordWriteHelper(object, address, scratch); bind(&done); - // Clobber clobbered registers when running with the debug-code flag + // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); - mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); - } -} - - -void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. - Register address, - Register scratch, - SaveFPRegsMode fp_mode, - RememberedSetFinalAction and_then) { - Label done; - if (FLAG_debug_code) { - Label ok; - JumpIfNotInNewSpace(object, scratch, &ok); - stop("Remembered set pointer is in new space"); - bind(&ok); - } - // Load store buffer top. - ExternalReference store_buffer = - ExternalReference::store_buffer_top(isolate()); - mov(ip, Operand(store_buffer)); - ldr(scratch, MemOperand(ip)); - // Store pointer to buffer and increment buffer top. - str(address, MemOperand(scratch, kPointerSize, PostIndex)); - // Write back new top of buffer. - str(scratch, MemOperand(ip)); - // Call stub on end of buffer. - // Check for end of buffer. - tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); - if (and_then == kFallThroughAtEnd) { - b(eq, &done); - } else { - ASSERT(and_then == kReturnAtEnd); - Ret(ne); - } - push(lr); - StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(fp_mode); - CallStub(&store_buffer_overflow); - pop(lr); - bind(&done); - if (and_then == kReturnAtEnd) { - Ret(); + mov(object, Operand(BitCast<int32_t>(kZapValue))); + mov(address, Operand(BitCast<int32_t>(kZapValue))); + mov(scratch, Operand(BitCast<int32_t>(kZapValue))); } } @@ -1015,9 +961,6 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, @@ -1045,9 +988,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code, RelocInfo::Mode rmode, InvokeFlag flag, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - Label done; InvokePrologue(expected, actual, code, no_reg, &done, flag, @@ -1071,9 +1011,6 @@ void MacroAssembler::InvokeFunction(Register fun, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - // Contract with called JS functions requires that function is passed in r1. ASSERT(fun.is(r1)); @@ -1098,9 +1035,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, InvokeFlag flag, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - ASSERT(function->is_compiled()); // Get the function and setup the context. @@ -1156,10 +1090,10 @@ void MacroAssembler::IsObjectJSStringType(Register object, #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { + ASSERT(allow_stub_calls()); mov(r0, Operand(0, RelocInfo::NONE)); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } #endif @@ -1859,127 +1793,13 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 0); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); b(hi, fail); } -void MacroAssembler::CheckFastObjectElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); - ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); - b(ls, fail); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); - b(hi, fail); -} - - -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); - b(hi, fail); -} - - -void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, - Register key_reg, - Register receiver_reg, - Register elements_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Label* fail) { - Label smi_value, maybe_nan, have_double_value, is_nan, done; - Register mantissa_reg = scratch2; - Register exponent_reg = scratch3; - - // Handle smi values specially. - JumpIfSmi(value_reg, &smi_value); - - // Ensure that the object is a heap number - CheckMap(value_reg, - scratch1, - isolate()->factory()->heap_number_map(), - fail, - DONT_DO_SMI_CHECK); - - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. - mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); - ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); - cmp(exponent_reg, scratch1); - b(ge, &maybe_nan); - - ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - - bind(&have_double_value); - add(scratch1, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - str(exponent_reg, FieldMemOperand(scratch1, offset)); - jmp(&done); - - bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - b(gt, &is_nan); - ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - cmp(mantissa_reg, Operand(0)); - b(eq, &have_double_value); - bind(&is_nan); - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); - mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); - jmp(&have_double_value); - - bind(&smi_value); - add(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - add(scratch1, scratch1, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - // scratch1 is now effective address of the double element - - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP3)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } - - Register untagged_value = receiver_reg; - SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble(this, - untagged_value, - destination, - d0, - mantissa_reg, - exponent_reg, - scratch4, - s2); - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP3); - vstr(d0, scratch1, 0); - } else { - str(mantissa_reg, MemOperand(scratch1, 0)); - str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); - } - bind(&done); -} - - void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, @@ -2075,13 +1895,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -2093,12 +1913,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -2201,12 +2022,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( } -bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { - if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); -} - - void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { add(sp, sp, Operand(num_arguments * kPointerSize)); @@ -2602,7 +2417,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1, kSaveFPRegs); + CEntryStub stub(1); + stub.SaveDoubles(); CallStub(&stub); } @@ -2675,9 +2491,6 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { - // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - GetBuiltinEntry(r2, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(r2)); @@ -2809,20 +2622,14 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif + // Disable stub call restrictions to always allow calls to abort. + AllowStubCallsScope allow_scope(this, true); mov(r0, Operand(p0)); push(r0); mov(r0, Operand(Smi::FromInt(p1 - p0))); push(r0); - // Disable stub call restrictions to always allow calls to abort. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kAbort, 2); - } else { - CallRuntime(Runtime::kAbort, 2); - } + CallRuntime(Runtime::kAbort, 2); // will not return here if (is_const_pool_blocked()) { // If the calling code cares about the exact number of @@ -3123,19 +2930,6 @@ void MacroAssembler::CopyBytes(Register src, } -void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler) { - Label loop, entry; - b(&entry); - bind(&loop); - str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); - bind(&entry); - cmp(start_offset, end_offset); - b(lt, &loop); -} - - void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. Register source, // Input. Register scratch) { @@ -3295,15 +3089,23 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { - mov(ip, Operand(function)); - CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); + CallCFunctionHelper(no_reg, + function, + ip, + num_reg_arguments, + num_double_arguments); } void MacroAssembler::CallCFunction(Register function, - int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); + Register scratch, + int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, + ExternalReference::the_hole_value_location(isolate()), + scratch, + num_reg_arguments, + num_double_arguments); } @@ -3314,15 +3116,17 @@ void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(Register function, + Register scratch, int num_arguments) { - CallCFunction(function, num_arguments, 0); + CallCFunction(function, scratch, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, + ExternalReference function_reference, + Register scratch, int num_reg_arguments, int num_double_arguments) { - ASSERT(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -3346,6 +3150,10 @@ void MacroAssembler::CallCFunctionHelper(Register function, // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. + if (function.is(no_reg)) { + mov(scratch, Operand(function_reference)); + function = scratch; + } Call(function); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); @@ -3377,185 +3185,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, } -void MacroAssembler::CheckPageFlag( - Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met) { - and_(scratch, object, Operand(~Page::kPageAlignmentMask)); - ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); - tst(scratch, Operand(mask)); - b(cc, condition_met); -} - - -void MacroAssembler::JumpIfBlack(Register object, - Register scratch0, - Register scratch1, - Label* on_black) { - HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); -} - - -void MacroAssembler::HasColor(Register object, - Register bitmap_scratch, - Register mask_scratch, - Label* has_color, - int first_bit, - int second_bit) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); - - GetMarkBits(object, bitmap_scratch, mask_scratch); - - Label other_color, word_boundary; - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - tst(ip, Operand(mask_scratch)); - b(first_bit == 1 ? eq : ne, &other_color); - // Shift left 1 by adding. - add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); - b(eq, &word_boundary); - tst(ip, Operand(mask_scratch)); - b(second_bit == 1 ? ne : eq, has_color); - jmp(&other_color); - - bind(&word_boundary); - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); - tst(ip, Operand(1)); - b(second_bit == 1 ? ne : eq, has_color); - bind(&other_color); -} - - -// Detect some, but not all, common pointer-free objects. This is used by the -// incremental write barrier which doesn't care about oddballs (they are always -// marked black immediately so this code is not hit). -void MacroAssembler::JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object) { - Label is_data_object; - ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); - CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - b(eq, &is_data_object); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); - b(ne, not_data_object); - bind(&is_data_object); -} - - -void MacroAssembler::GetMarkBits(Register addr_reg, - Register bitmap_reg, - Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); - and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); - Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); - const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; - Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); - add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); - mov(ip, Operand(1)); - mov(mask_reg, Operand(ip, LSL, mask_reg)); -} - - -void MacroAssembler::EnsureNotWhite( - Register value, - Register bitmap_scratch, - Register mask_scratch, - Register load_scratch, - Label* value_is_white_and_not_data) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); - GetMarkBits(value, bitmap_scratch, mask_scratch); - - // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); - - Label done; - - // Since both black and grey have a 1 in the first position and white does - // not have a 1 there we only need to check one bit. - ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - tst(mask_scratch, load_scratch); - b(ne, &done); - - if (FLAG_debug_code) { - // Check for impossible bit pattern. - Label ok; - // LSL may overflow, making the check conservative. - tst(load_scratch, Operand(mask_scratch, LSL, 1)); - b(eq, &ok); - stop("Impossible marking bit pattern"); - bind(&ok); - } - - // Value is white. We check whether it is data that doesn't need scanning. - // Currently only checks for HeapNumber and non-cons strings. - Register map = load_scratch; // Holds map while checking type. - Register length = load_scratch; // Holds length of object after testing type. - Label is_data_object; - - // Check for heap-number - ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); - CompareRoot(map, Heap::kHeapNumberMapRootIndex); - mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); - b(eq, &is_data_object); - - // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - Register instance_type = load_scratch; - ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); - tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); - b(ne, value_is_white_and_not_data); - // It's a non-indirect (non-cons and non-slice) string. - // If it's external, the length is just ExternalString::kSize. - // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). - // External strings are the only ones with the kExternalStringTag bit - // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); - tst(instance_type, Operand(kExternalStringTag)); - mov(length, Operand(ExternalString::kSize), LeaveCC, ne); - b(ne, &is_data_object); - - // Sequential string, either ASCII or UC16. - // For ASCII (char-size of 1) we shift the smi tag away to get the length. - // For UC16 (char-size of 2) we just leave the smi tag in place, thereby - // getting the length multiplied by 2. - ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); - ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - ldr(ip, FieldMemOperand(value, String::kLengthOffset)); - tst(instance_type, Operand(kStringEncodingMask)); - mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); - add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); - and_(length, length, Operand(~kObjectAlignmentMask)); - - bind(&is_data_object); - // Value is a data object, and it is white. Mark it black. Since we know - // that the object is white we can make it black by flipping one bit. - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - orr(ip, ip, Operand(mask_scratch)); - str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - - and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); - add(ip, ip, Operand(length)); - str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); - - bind(&done); -} - - void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { Usat(output_reg, 8, Operand(input_reg)); } @@ -3605,17 +3234,6 @@ void MacroAssembler::LoadInstanceDescriptors(Register map, } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; -} - - CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 8ee468a917..6084fde2d3 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -29,7 +29,6 @@ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #include "assembler.h" -#include "frames.h" #include "v8globals.h" namespace v8 { @@ -80,14 +79,6 @@ enum ObjectToDoubleFlags { }; -enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; -enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; -enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; - - -bool AreAliased(Register r1, Register r2, Register r3, Register r4); - - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -166,126 +157,40 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond = al); - // --------------------------------------------------------------------------- - // GC Support - - void IncrementalMarkingRecordWriteHelper(Register object, - Register value, - Register address); - - enum RememberedSetFinalAction { - kReturnAtEnd, - kFallThroughAtEnd - }; - - // Record in the remembered set the fact that we have a pointer to new space - // at the address pointed to by the addr register. Only works if addr is not - // in new space. - void RememberedSetHelper(Register object, // Used for debug code. - Register addr, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetFinalAction and_then); - - void CheckPageFlag(Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met); - - // Check if object is in new space. Jumps if the object is not in new space. - // The register scratch can be object itself, but scratch will be clobbered. - void JumpIfNotInNewSpace(Register object, - Register scratch, - Label* branch) { - InNewSpace(object, scratch, ne, branch); - } - // Check if object is in new space. Jumps if the object is in new space. - // The register scratch can be object itself, but it will be clobbered. - void JumpIfInNewSpace(Register object, - Register scratch, - Label* branch) { - InNewSpace(object, scratch, eq, branch); - } + // Check if object is in new space. + // scratch can be object itself, but it will be clobbered. + void InNewSpace(Register object, + Register scratch, + Condition cond, // eq for new space, ne otherwise + Label* branch); - // Check if an object has a given incremental marking color. - void HasColor(Register object, - Register scratch0, - Register scratch1, - Label* has_color, - int first_bit, - int second_bit); - void JumpIfBlack(Register object, - Register scratch0, - Register scratch1, - Label* on_black); - - // Checks the color of an object. If the object is already grey or black - // then we just fall through, since it is already live. If it is white and - // we can determine that it doesn't need to be scanned, then we just mark it - // black and fall through. For the rest we jump to the label so the - // incremental marker can fix its assumptions. - void EnsureNotWhite(Register object, - Register scratch1, - Register scratch2, - Register scratch3, - Label* object_is_white_and_not_data); + // For the page containing |object| mark the region covering [address] + // dirty. The object address must be in the first 8K of an allocated page. + void RecordWriteHelper(Register object, + Register address, + Register scratch); - // Detects conservatively whether an object is data-only, ie it does need to - // be scanned by the garbage collector. - void JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object); - - // Notify the garbage collector that we wrote a pointer into an object. - // |object| is the object being stored into, |value| is the object being - // stored. value and scratch registers are clobbered by the operation. - // The offset is the offset from the start of the object, not the offset from - // the tagged HeapObject pointer. For use with FieldOperand(reg, off). - void RecordWriteField( - Register object, - int offset, - Register value, - Register scratch, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); - - // As above, but the offset has the tag presubtracted. For use with - // MemOperand(reg, off). - inline void RecordWriteContextSlot( - Register context, - int offset, - Register value, - Register scratch, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { - RecordWriteField(context, - offset + kHeapObjectTag, - value, - scratch, - lr_status, - save_fp, - remembered_set_action, - smi_check); - } + // For the page containing |object| mark the region covering + // [object+offset] dirty. The object address must be in the first 8K + // of an allocated page. The 'scratch' registers are used in the + // implementation and all 3 registers are clobbered by the + // operation, as well as the ip register. RecordWrite updates the + // write barrier even when storing smis. + void RecordWrite(Register object, + Operand offset, + Register scratch0, + Register scratch1); - // For a given |object| notify the garbage collector that the slot |address| - // has been written. |value| is the object being stored. The value and - // address registers are clobbered by the operation. - void RecordWrite( - Register object, - Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + // For the page containing |object| mark the region covering + // [address] dirty. The object address must be in the first 8K of an + // allocated page. All 3 registers are clobbered by the operation, + // as well as the ip register. RecordWrite updates the write barrier + // even when storing smis. + void RecordWrite(Register object, + Register address, + Register scratch); // Push a handle. void Push(Handle<Object> handle); @@ -413,6 +318,16 @@ class MacroAssembler: public Assembler { const double imm, const Condition cond = al); + + // --------------------------------------------------------------------------- + // Activation frames + + void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } + void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } + + void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } + void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } + // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0); @@ -654,13 +569,6 @@ class MacroAssembler: public Assembler { Register length, Register scratch); - // Initialize fields with filler values. Fields starting at |start_offset| - // not including end_offset are overwritten with the value in |filler|. At - // the end the loop, |start_offset| takes the value of |end_offset|. - void InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler); - // --------------------------------------------------------------------------- // Support functions. @@ -700,31 +608,6 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); - // Check if a map for a JSObject indicates that the object can have both smi - // and HeapObject elements. Jump to the specified label if it does not. - void CheckFastObjectElements(Register map, - Register scratch, - Label* fail); - - // Check if a map for a JSObject indicates that the object has fast smi only - // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail); - - // Check to see if maybe_number can be stored as a double in - // FastDoubleElements. If it can, store it at the index specified by key in - // the FastDoubleElements array elements, otherwise jump to fail. - void StoreNumberToDoubleElements(Register value_reg, - Register key_reg, - Register receiver_reg, - Register elements_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Label* fail); - // Check if the map of an object is equal to a specified map (either // given directly or as an index into the root list) and branch to // label if not. Skip the smi check if not required (object is known @@ -947,11 +830,11 @@ class MacroAssembler: public Assembler { // return address (unless this is somehow accounted for by the called // function). void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, int num_arguments); + void CallCFunction(Register function, Register scratch, int num_arguments); void CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments); - void CallCFunction(Register function, + void CallCFunction(Register function, Register scratch, int num_reg_arguments, int num_double_arguments); @@ -1019,9 +902,6 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } - void set_has_frame(bool value) { has_frame_ = value; } - bool has_frame() { return has_frame_; } - inline bool AllowThisStubCall(CodeStub* stub); // EABI variant for double arguments in use. bool use_eabi_hardfloat() { @@ -1168,12 +1048,10 @@ class MacroAssembler: public Assembler { void LoadInstanceDescriptors(Register map, Register descriptors); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - private: void CallCFunctionHelper(Register function, + ExternalReference function_reference, + Register scratch, int num_reg_arguments, int num_double_arguments); @@ -1189,25 +1067,16 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper, CallKind call_kind); + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + void InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. - void InNewSpace(Register object, - Register scratch, - Condition cond, // eq for new space, ne otherwise. - Label* branch); - - // Helper for finding the mark bits for an address. Afterwards, the - // bitmap register points at the word with the mark bits and the mask - // the position of the first bit. Leaves addr_reg unchanged. - inline void GetMarkBits(Register addr_reg, - Register bitmap_reg, - Register mask_reg); - // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); @@ -1215,7 +1084,6 @@ class MacroAssembler: public Assembler { bool generating_stub_; bool allow_stub_calls_; - bool has_frame_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index c876467938..cd76edbf15 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -371,12 +371,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // Isolate. __ mov(r3, Operand(ExternalReference::isolate_address())); - { - AllowExternalCallThatCantCauseGC scope(masm_); - ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(function, argument_count); - } + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); // Check if function returned non-zero for success or zero for failure. __ cmp(r0, Operand(0, RelocInfo::NONE)); @@ -614,12 +611,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Entry code: __ bind(&entry_label_); - - // Tell the system that we have a stack frame. Because the type is MANUAL, no - // is generated. - FrameScope scope(masm_, StackFrame::MANUAL); - - // Actually emit code to start a new stack frame. // Push arguments // Save callee-save registers. // Start new stack frame. diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 5704202622..6af535553f 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1618,8 +1618,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) { ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address); intptr_t* address = reinterpret_cast<intptr_t*>(start_address); - // Catch null pointers a little earlier. - ASSERT(start_address > 8191 || start_address < 0); int reg = 0; while (rlist != 0) { if ((rlist & 1) != 0) { diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 09ecc798c5..f8565924b1 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -431,13 +431,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, r0); - __ RecordWriteField(receiver_reg, - offset, - name_reg, - scratch, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -450,13 +444,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, r0); - __ RecordWriteField(scratch, - offset, - name_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs); + __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); } // Return the value (register r0). @@ -565,10 +553,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { } -static MaybeObject* GenerateFastApiDirectCall( - MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { // ----------- S t a t e ------------- // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee js function @@ -604,8 +591,6 @@ static MaybeObject* GenerateFastApiDirectCall( ApiFunction fun(api_function_address); const int kApiStackSpace = 4; - - FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // r0 = v8::Arguments& @@ -631,11 +616,9 @@ static MaybeObject* GenerateFastApiDirectCall( ExternalReference ref = ExternalReference(&fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); - AllowExternalCallThatCantCauseGC scope(masm); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } - class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, @@ -811,7 +794,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { miss_label); // Call a runtime function to load the interceptor property. - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); // Save the name_ register across the call. __ push(name_); @@ -828,8 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - - // Leave the internal frame. + __ LeaveInternalFrame(); } void LoadWithInterceptor(MacroAssembler* masm, @@ -838,19 +820,18 @@ class CallInterceptorCompiler BASE_EMBEDDED { JSObject* holder_obj, Register scratch, Label* interceptor_succeeded) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(holder, name_); - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - } + __ EnterInternalFrame(); + __ Push(holder, name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + __ LeaveInternalFrame(); // If interceptor returns no-result sentinel, call the constant function. __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); @@ -1247,10 +1228,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, ApiFunction fun(getter_address); const int kApiStackSpace = 1; - - FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object **args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); @@ -1310,44 +1288,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - { - FrameScope frame_scope(masm(), StackFrame::INTERNAL); + __ EnterInternalFrame(); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ Push(receiver, holder_reg, name_reg); - } else { - __ Push(holder_reg, name_reg); - } - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch1); - __ b(eq, &interceptor_failed); - frame_scope.GenerateLeaveFrame(); - __ Ret(); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ Push(receiver, holder_reg, name_reg); + } else { + __ Push(holder_reg, name_reg); + } - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch1); + __ b(eq, &interceptor_failed); + __ LeaveInternalFrame(); + __ Ret(); - // Leave the internal frame. + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); } + __ LeaveInternalFrame(); + // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. if (interceptor_holder != lookup->holder()) { @@ -1580,7 +1556,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, DONT_DO_SMI_CHECK); if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements; + Label exit, with_write_barrier, attempt_to_grow_elements; // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1595,15 +1571,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmp(r0, r4); __ b(gt, &attempt_to_grow_elements); - // Check if value is a smi. - Label with_write_barrier; - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ JumpIfNotSmi(r4, &with_write_barrier); - // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Push the element. + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); // We may need a register containing the address end_elements below, // so write back the value in end_elements. __ add(end_elements, elements, @@ -1613,31 +1585,14 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); // Check for a smi. + __ JumpIfNotSmi(r4, &with_write_barrier); + __ bind(&exit); __ Drop(argc + 1); __ Ret(); __ bind(&with_write_barrier); - - __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastSmiOnlyElements(r6, r6, &call_builtin); - - // Save new length. - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Push the element. - // We may need a register containing the address end_elements below, - // so write back the value in end_elements. - __ add(end_elements, elements, - Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - - __ RecordWrite(elements, - end_elements, - r4, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ InNewSpace(elements, r4, eq, &exit); + __ RecordWriteHelper(elements, end_elements, r4); __ Drop(argc + 1); __ Ret(); @@ -1649,15 +1604,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(&call_builtin); } - __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case - // the new element is non-Smi. For now, delegate to the builtin. - Label no_fast_elements_check; - __ JumpIfSmi(r2, &no_fast_elements_check); - __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastObjectElements(r7, r7, &call_builtin); - __ bind(&no_fast_elements_check); - Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate); @@ -1684,7 +1630,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Update new_space_allocation_top. __ str(r6, MemOperand(r7)); // Push the argument. - __ str(r2, MemOperand(end_elements)); + __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize)); + __ str(r6, MemOperand(end_elements)); // Fill the rest with holes. __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { @@ -2766,15 +2713,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); - __ mov(r1, r0); - __ RecordWriteField(r4, - JSGlobalPropertyCell::kValueOffset, - r1, - r2, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET); - Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); __ Ret(); @@ -3516,7 +3454,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3603,7 +3540,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3944,7 +3880,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4008,7 +3943,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4148,7 +4082,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4301,10 +4234,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement( - MacroAssembler* masm, - bool is_js_array, - ElementsKind elements_kind) { +void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, + bool is_js_array) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4346,33 +4277,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(value_reg, &miss_force_generic); - __ add(scratch, - elements_reg, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value_reg, MemOperand(scratch)); - } else { - ASSERT(elements_kind == FAST_ELEMENTS); - __ add(scratch, - elements_reg, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ add(scratch, - scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value_reg, MemOperand(scratch)); - __ mov(receiver_reg, value_reg); - __ RecordWrite(elements_reg, // Object. - scratch, // Address. - receiver_reg, // Value. - kLRHasNotBeenSaved, - kDontSaveFPRegs); - } + __ add(scratch, + elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ str(value_reg, + MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ RecordWrite(scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), + receiver_reg , elements_reg); + // value_reg (r0) is preserved. // Done. __ Ret(); @@ -4396,15 +4309,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r4 : scratch // -- r5 : scratch // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; Register value_reg = r0; Register key_reg = r1; Register receiver_reg = r2; - Register elements_reg = r3; - Register scratch1 = r4; - Register scratch2 = r5; - Register scratch3 = r6; + Register scratch = r3; + Register elements_reg = r4; + Register mantissa_reg = r5; + Register exponent_reg = r6; Register scratch4 = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4416,25 +4329,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { - __ ldr(scratch1, + __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ cmp(key_reg, scratch1); + __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - receiver_reg, - elements_reg, - scratch1, - scratch2, - scratch3, - scratch4, - &miss_force_generic); + // Handle smi values specially. + __ JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + __ CheckMap(value_reg, + scratch, + masm->isolate()->factory()->heap_number_map(), + &miss_force_generic, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); + __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + __ cmp(exponent_reg, scratch); + __ b(ge, &maybe_nan); + + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + __ bind(&have_double_value); + __ add(scratch, elements_reg, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + __ str(exponent_reg, FieldMemOperand(scratch, offset)); + __ Ret(); + + __ bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + __ b(gt, &is_nan); + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + __ cmp(mantissa_reg, Operand(0)); + __ b(eq, &have_double_value); + __ bind(&is_nan); + // Load canonical NaN for storing into the double array. + uint64_t nan_int64 = BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); + __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); + __ jmp(&have_double_value); + + __ bind(&smi_value); + __ add(scratch, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch, scratch, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + // scratch is now effective address of the double element + + FloatingPointHelper::Destination destination; + if (CpuFeatures::IsSupported(VFP3)) { + destination = FloatingPointHelper::kVFPRegisters; + } else { + destination = FloatingPointHelper::kCoreRegisters; + } + + Register untagged_value = receiver_reg; + __ SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble( + masm, + untagged_value, + destination, + d0, + mantissa_reg, + exponent_reg, + scratch4, + s2); + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + __ vstr(d0, scratch, 0); + } else { + __ str(mantissa_reg, MemOperand(scratch, 0)); + __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); + } __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. |