diff options
Diffstat (limited to 'deps/v8/src/arm/code-stubs-arm.cc')
-rw-r--r-- | deps/v8/src/arm/code-stubs-arm.cc | 1926 |
1 files changed, 1323 insertions, 603 deletions
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 36450c945f..c65f5bdf84 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -98,9 +98,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { &gc, TAG_OBJECT); - int map_index = strict_mode_ == kStrictMode - ? Context::STRICT_MODE_FUNCTION_MAP_INDEX - : Context::FUNCTION_MAP_INDEX; + int map_index = (language_mode_ == CLASSIC_MODE) + ? Context::FUNCTION_MAP_INDEX + : Context::STRICT_MODE_FUNCTION_MAP_INDEX; // Compute the function map in the current global context and set that // as the map of the allocated object. @@ -122,7 +122,6 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); - // Initialize the code pointer in the function to be the one // found in the shared function info object. __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); @@ -156,21 +155,19 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Load the function from the stack. __ ldr(r3, MemOperand(sp, 0)); - // Setup the object header. - __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + // Set up the object header. + __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex); __ mov(r2, Operand(Smi::FromInt(length))); __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); + __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - // Setup the fixed slots. + // Set up the fixed slots, copy the global object from the previous context. + __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); __ mov(r1, Operand(Smi::FromInt(0))); __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); - - // Copy the global object from the previous context. - __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); // Initialize the rest of the slots to undefined. __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); @@ -189,6 +186,119 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: function. + // [sp + kPointerSize]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r0, r1, r2, &gc, TAG_OBJECT); + + // Load the function from the stack. + __ ldr(r3, MemOperand(sp, 0)); + + // Load the serialized scope info from the stack. + __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); + + // Set up the object header. + __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ mov(r2, Operand(Smi::FromInt(length))); + __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(r3, &after_sentinel); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmp(r3, Operand::Zero()); + __ Assert(eq, message); + } + __ ldr(r3, GlobalObjectOperand()); + __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); + __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Set up the fixed slots, copy the global object from the previous context. + __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); + __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); + __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); + __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX)); + + // Initialize the rest of the slots to the hole value. + __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < slots_; i++) { + __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); + } + + // Remove the on-stack argument and return. + __ mov(cp, r0); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + +static void GenerateFastCloneShallowArrayCommon( + MacroAssembler* masm, + int length, + FastCloneShallowArrayStub::Mode mode, + Label* fail) { + // Registers on entry: + // + // r3: boilerplate literal array. + ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); + + // All sizes here are multiples of kPointerSize. + int elements_size = 0; + if (length > 0) { + elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS + ? FixedDoubleArray::SizeFor(length) + : FixedArray::SizeFor(length); + } + int size = JSArray::kSize + elements_size; + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, + r0, + r1, + r2, + fail, + TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length == 0)) { + __ ldr(r1, FieldMemOperand(r3, i)); + __ str(r1, FieldMemOperand(r0, i)); + } + } + + if (length > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); + __ add(r2, r0, Operand(JSArray::kSize)); + __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); + + // Copy the elements array. + ASSERT((elements_size % kPointerSize) == 0); + __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); + } +} + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -196,10 +306,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // [sp + kPointerSize]: literal index. // [sp + (2 * kPointerSize)]: literals array. - // All sizes here are multiples of kPointerSize. - int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; - int size = JSArray::kSize + elements_size; - // Load boilerplate object into r3 and check if we need to create a // boilerplate. Label slow_case; @@ -207,64 +313,109 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r3, ip); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); __ b(eq, &slow_case); + FastCloneShallowArrayStub::Mode mode = mode_; + if (mode == CLONE_ANY_ELEMENTS) { + Label double_elements, check_fast_elements; + __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); + __ b(ne, &check_fast_elements); + GenerateFastCloneShallowArrayCommon(masm, 0, + COPY_ON_WRITE_ELEMENTS, &slow_case); + // Return and remove the on-stack parameters. + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&check_fast_elements); + __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); + __ b(ne, &double_elements); + GenerateFastCloneShallowArrayCommon(masm, length_, + CLONE_ELEMENTS, &slow_case); + // Return and remove the on-stack parameters. + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&double_elements); + mode = CLONE_DOUBLE_ELEMENTS; + // Fall through to generate the code to handle double elements. + } + if (FLAG_debug_code) { const char* message; Heap::RootListIndex expected_map_index; - if (mode_ == CLONE_ELEMENTS) { + if (mode == CLONE_ELEMENTS) { message = "Expected (writable) fixed array"; expected_map_index = Heap::kFixedArrayMapRootIndex; + } else if (mode == CLONE_DOUBLE_ELEMENTS) { + message = "Expected (writable) fixed double array"; + expected_map_index = Heap::kFixedDoubleArrayMapRootIndex; } else { - ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + ASSERT(mode == COPY_ON_WRITE_ELEMENTS); message = "Expected copy-on-write fixed array"; expected_map_index = Heap::kFixedCOWArrayMapRootIndex; } __ push(r3); __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadRoot(ip, expected_map_index); - __ cmp(r3, ip); + __ CompareRoot(r3, expected_map_index); __ Assert(eq, message); __ pop(r3); } - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, - r0, - r1, - r2, - &slow_case, - TAG_OBJECT); + GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length_ == 0)) { - __ ldr(r1, FieldMemOperand(r3, i)); - __ str(r1, FieldMemOperand(r0, i)); - } - } + // Return and remove the on-stack parameters. + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); - if (length_ > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ add(r2, r0, Operand(JSArray::kSize)); - __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} - // Copy the elements array. - __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); + +void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: object literal flags. + // [sp + kPointerSize]: constant properties. + // [sp + (2 * kPointerSize)]: literal index. + // [sp + (3 * kPointerSize)]: literals array. + + // Load boilerplate object into r3 and check if we need to create a + // boilerplate. + Label slow_case; + __ ldr(r3, MemOperand(sp, 3 * kPointerSize)); + __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); + __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ b(eq, &slow_case); + + // Check that the boilerplate contains only fast properties and we can + // statically determine the instance size. + int size = JSObject::kHeaderSize + length_ * kPointerSize; + __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset)); + __ cmp(r0, Operand(size >> kPointerSizeLog2)); + __ b(ne, &slow_case); + + // Allocate the JS object and copy header together with all in-object + // properties from the boilerplate. + __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT); + for (int i = 0; i < size; i += kPointerSize) { + __ ldr(r1, FieldMemOperand(r3, i)); + __ str(r1, FieldMemOperand(r0, i)); } // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); + __ add(sp, sp, Operand(4 * kPointerSize)); __ Ret(); __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); + __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); } @@ -432,7 +583,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Label is_smi, done; - __ JumpIfSmi(object, &is_smi); + // Smi-check + __ UntagAndJumpIfSmi(scratch1, object, &is_smi); + // Heap number check __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); // Handle loading a double from a heap number. @@ -454,7 +607,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); // Convert smi to double using VFP instructions. - __ SmiUntag(scratch1, object); __ vmov(dst.high(), scratch1); __ vcvt_f64_s32(dst, dst.high()); if (destination == kCoreRegisters) { @@ -489,11 +641,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); } - Label is_smi; Label done; Label not_in_int32_range; - __ JumpIfSmi(object, &is_smi); + __ UntagAndJumpIfSmi(dst, object, &done); __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); __ cmp(scratch1, heap_number_map); __ b(ne, not_number); @@ -513,10 +664,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, scratch1, scratch2, scratch3); - __ jmp(&done); - - __ bind(&is_smi); - __ SmiUntag(dst, object); __ bind(&done); } @@ -559,7 +706,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, // Get the absolute value of the object (as an unsigned integer). __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); - // Get mantisssa[51:20]. + // Get mantissa[51:20]. // Get the position of the first set bit. __ CountLeadingZeros(dst1, int_scratch, scratch2); @@ -689,10 +836,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, Label done; - // Untag the object into the destination register. - __ SmiUntag(dst, object); - // Just return if the object is a smi. - __ JumpIfSmi(object, &done); + __ UntagAndJumpIfSmi(dst, object, &done); if (FLAG_debug_code) { __ AbortIfNotRootValue(heap_number_map, @@ -793,7 +937,7 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // non zero bits left. So we need the (30 - exponent) last bits of the // 31 higher bits of the mantissa to be null. // Because bits [21:0] are null, we can check instead that the - // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. + // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. // Get the 32 higher bits of the mantissa in dst. __ Ubfx(dst, @@ -838,9 +982,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - // Call C routine that may not cause GC or other trouble. - __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), - 0, 2); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { @@ -857,6 +1003,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } +bool WriteInt32ToHeapNumberStub::IsPregenerated() { + // These variants are compiled ahead of time. See next method. + if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { + return true; + } + if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { + return true; + } + // Other register combinations are generated as and when they are needed, + // so it is unsafe to call them from stubs (we can't generate a stub while + // we are generating a stub). + return false; +} + + +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { + WriteInt32ToHeapNumberStub stub1(r1, r0, r2); + WriteInt32ToHeapNumberStub stub2(r2, r0, r3); + stub1.GetCode()->set_is_pregenerated(true); + stub2.GetCode()->set_is_pregenerated(true); +} + + // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1197,6 +1366,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } + + AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 0, 2); __ pop(pc); // Return. @@ -1214,7 +1385,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); Label first_non_object; // Get the type of the first operand into r2 and compare it with // FIRST_SPEC_OBJECT_TYPE. @@ -1606,6 +1777,8 @@ void CompareStub::Generate(MacroAssembler* masm) { // The stub expects its argument in the tos_ register and returns its result in // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // This stub uses VFP3 instructions. CpuFeatures::Scope scope(VFP3); @@ -1713,6 +1886,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { } +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ stm(db_w, sp, kCallerSaved | lr.bit()); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vstr(reg, MemOperand(sp, i * kDoubleSize)); + } + } + const int argument_count = 1; + const int fp_argument_count = 0; + const Register scratch = r1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ mov(r0, Operand(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vldr(reg, MemOperand(sp, i * kDoubleSize)); + } + __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + } + __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). +} + + void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. @@ -1866,12 +2074,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r1, Operand(r0)); - __ pop(r0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r1, Operand(r0)); + __ pop(r0); + } __ bind(&heapnumber_allocated); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); @@ -1912,13 +2121,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r2, r0); // Move the new heap number into r2. + // Get the heap number into r0, now that the new heap number is in r2. + __ pop(r0); + } // Convert the heap number in r0 to an untagged integer in r1. // This can't go slow-case because it's the same number we already @@ -2028,6 +2238,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -2110,7 +2324,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { __ cmp(ip, Operand(scratch2)); __ b(ne, ¬_smi_result); // Go slow on zero result to handle -0. - __ tst(scratch1, Operand(scratch1)); + __ cmp(scratch1, Operand(0)); __ mov(right, Operand(scratch1), LeaveCC, ne); __ Ret(ne); // We need -0 if we were multiplying a negative number with 0 to get 0. @@ -3082,10 +3296,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Check if cache matches: Double value is stored in uint32_t[2] array. __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); __ cmp(r2, r4); - __ b(ne, &calculate); - __ cmp(r3, r5); + __ cmp(r3, r5, eq); __ b(ne, &calculate); // Cache hit. Load result, cleanup and return. + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter( + counters->transcendental_cache_hit(), 1, scratch0, scratch1); if (tagged) { // Pop input value from stack and load result into r0. __ pop(); @@ -3098,6 +3314,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { } // if (CpuFeatures::IsSupported(VFP3)) __ bind(&calculate); + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter( + counters->transcendental_cache_miss(), 1, scratch0, scratch1); if (tagged) { __ bind(&invalid_cache); ExternalReference runtime_function = @@ -3133,10 +3352,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + } __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ Ret(); @@ -3149,14 +3369,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // We return the value in d2 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ mov(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } } @@ -3173,6 +3394,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, } else { __ vmov(r0, r1, d2); } + AllowExternalCallThatCantCauseGC scope(masm); switch (type_) { case TranscendentalCache::SIN: __ CallCFunction(ExternalReference::math_sin_double_function(isolate), @@ -3182,6 +3404,10 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 0, 1); break; + case TranscendentalCache::TAN: + __ CallCFunction(ExternalReference::math_tan_double_function(isolate), + 0, 1); + break; case TranscendentalCache::LOG: __ CallCFunction(ExternalReference::math_log_double_function(isolate), 0, 1); @@ -3199,6 +3425,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { // Add more cases when necessary. case TranscendentalCache::SIN: return Runtime::kMath_sin; case TranscendentalCache::COS: return Runtime::kMath_cos; + case TranscendentalCache::TAN: return Runtime::kMath_tan; case TranscendentalCache::LOG: return Runtime::kMath_log; default: UNIMPLEMENTED(); @@ -3213,104 +3440,201 @@ void StackCheckStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - Label call_runtime; - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - - Label base_not_smi; - Label exponent_not_smi; - Label convert_exponent; - - const Register base = r0; - const Register exponent = r1; - const Register heapnumbermap = r5; - const Register heapnumber = r6; - const DoubleRegister double_base = d0; - const DoubleRegister double_exponent = d1; - const DoubleRegister double_result = d2; - const SwVfpRegister single_scratch = s0; - const Register scratch = r9; - const Register scratch2 = r7; - - __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); + CpuFeatures::Scope vfp3_scope(VFP3); + const Register base = r1; + const Register exponent = r2; + const Register heapnumbermap = r5; + const Register heapnumber = r0; + const DoubleRegister double_base = d1; + const DoubleRegister double_exponent = d2; + const DoubleRegister double_result = d3; + const DoubleRegister double_scratch = d0; + const SwVfpRegister single_scratch = s0; + const Register scratch = r9; + const Register scratch2 = r7; + + Label call_runtime, done, int_exponent; + if (exponent_type_ == ON_STACK) { + Label base_is_smi, unpack_exponent; + // The exponent and base are supplied as arguments on the stack. + // This can only happen if the stub is called from non-optimized code. + // Load input parameters from stack to double registers. __ ldr(base, MemOperand(sp, 1 * kPointerSize)); __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); - // Convert base to double value and store it in d0. - __ JumpIfNotSmi(base, &base_not_smi); - // Base is a Smi. Untag and convert it. - __ SmiUntag(base); - __ vmov(single_scratch, base); - __ vcvt_f64_s32(double_base, single_scratch); - __ b(&convert_exponent); + __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); - __ bind(&base_not_smi); + __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); __ cmp(scratch, heapnumbermap); __ b(ne, &call_runtime); - // Base is a heapnumber. Load it into double register. + __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); + __ jmp(&unpack_exponent); - __ bind(&convert_exponent); - __ JumpIfNotSmi(exponent, &exponent_not_smi); - __ SmiUntag(exponent); - - // The base is in a double register and the exponent is - // an untagged smi. Allocate a heap number and call a - // C function for integer exponents. The register containing - // the heap number is callee-saved. - __ AllocateHeapNumber(heapnumber, - scratch, - scratch2, - heapnumbermap, - &call_runtime); - __ push(lr); - __ PrepareCallCFunction(1, 1, scratch); - __ SetCallCDoubleArguments(double_base, exponent); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), - 1, 1); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - __ vstr(double_result, - FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); - __ mov(r0, heapnumber); - __ Ret(2 * kPointerSize); + __ bind(&base_is_smi); + __ vmov(single_scratch, scratch); + __ vcvt_f64_s32(double_base, single_scratch); + __ bind(&unpack_exponent); + + __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); - __ bind(&exponent_not_smi); __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); __ cmp(scratch, heapnumbermap); __ b(ne, &call_runtime); - // Exponent is a heapnumber. Load it into double register. __ vldr(double_exponent, FieldMemOperand(exponent, HeapNumber::kValueOffset)); + } else if (exponent_type_ == TAGGED) { + // Base is already in double_base. + __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); + + __ vldr(double_exponent, + FieldMemOperand(exponent, HeapNumber::kValueOffset)); + } + + if (exponent_type_ != INTEGER) { + Label int_exponent_convert; + // Detect integer exponents stored as double. + __ vcvt_u32_f64(single_scratch, double_exponent); + // We do not check for NaN or Infinity here because comparing numbers on + // ARM correctly distinguishes NaNs. We end up calling the built-in. + __ vcvt_f64_u32(double_scratch, single_scratch); + __ VFPCompareAndSetFlags(double_scratch, double_exponent); + __ b(eq, &int_exponent_convert); + + if (exponent_type_ == ON_STACK) { + // Detect square root case. Crankshaft detects constant +/-0.5 at + // compile time and uses DoMathPowHalf instead. We then skip this check + // for non-constant cases of +/-0.5 as these hardly occur. + Label not_plus_half; + + // Test for 0.5. + __ vmov(double_scratch, 0.5); + __ VFPCompareAndSetFlags(double_exponent, double_scratch); + __ b(ne, ¬_plus_half); + + // Calculates square root of base. Check for the special case of + // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). + __ vmov(double_scratch, -V8_INFINITY); + __ VFPCompareAndSetFlags(double_base, double_scratch); + __ vneg(double_result, double_scratch, eq); + __ b(eq, &done); + + // Add +0 to convert -0 to +0. + __ vadd(double_scratch, double_base, kDoubleRegZero); + __ vsqrt(double_result, double_scratch); + __ jmp(&done); + + __ bind(¬_plus_half); + __ vmov(double_scratch, -0.5); + __ VFPCompareAndSetFlags(double_exponent, double_scratch); + __ b(ne, &call_runtime); + + // Calculates square root of base. Check for the special case of + // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). + __ vmov(double_scratch, -V8_INFINITY); + __ VFPCompareAndSetFlags(double_base, double_scratch); + __ vmov(double_result, kDoubleRegZero, eq); + __ b(eq, &done); + + // Add +0 to convert -0 to +0. + __ vadd(double_scratch, double_base, kDoubleRegZero); + __ vmov(double_result, 1); + __ vsqrt(double_scratch, double_scratch); + __ vdiv(double_result, double_result, double_scratch); + __ jmp(&done); + } - // The base and the exponent are in double registers. - // Allocate a heap number and call a C function for - // double exponents. The register containing - // the heap number is callee-saved. - __ AllocateHeapNumber(heapnumber, - scratch, - scratch2, - heapnumbermap, - &call_runtime); __ push(lr); - __ PrepareCallCFunction(0, 2, scratch); - __ SetCallCDoubleArguments(double_base, double_exponent); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), - 0, 2); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(0, 2, scratch); + __ SetCallCDoubleArguments(double_base, double_exponent); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + } __ pop(lr); __ GetCFunctionDoubleResult(double_result); + __ jmp(&done); + + __ bind(&int_exponent_convert); + __ vcvt_u32_f64(single_scratch, double_exponent); + __ vmov(scratch, single_scratch); + } + + // Calculate power with integer exponent. + __ bind(&int_exponent); + + // Get two copies of exponent in the registers scratch and exponent. + if (exponent_type_ == INTEGER) { + __ mov(scratch, exponent); + } else { + // Exponent has previously been stored into scratch as untagged integer. + __ mov(exponent, scratch); + } + __ vmov(double_scratch, double_base); // Back up base. + __ vmov(double_result, 1.0); + + // Get absolute value of exponent. + __ cmp(scratch, Operand(0)); + __ mov(scratch2, Operand(0), LeaveCC, mi); + __ sub(scratch, scratch2, scratch, LeaveCC, mi); + + Label while_true; + __ bind(&while_true); + __ mov(scratch, Operand(scratch, ASR, 1), SetCC); + __ vmul(double_result, double_result, double_scratch, cs); + __ vmul(double_scratch, double_scratch, double_scratch, ne); + __ b(ne, &while_true); + + __ cmp(exponent, Operand(0)); + __ b(ge, &done); + __ vmov(double_scratch, 1.0); + __ vdiv(double_result, double_scratch, double_result); + // Test whether result is zero. Bail out to check for subnormal result. + // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. + __ VFPCompareAndSetFlags(double_result, 0.0); + __ b(ne, &done); + // double_exponent may not containe the exponent value if the input was a + // smi. We set it with exponent value before bailing out. + __ vmov(single_scratch, exponent); + __ vcvt_f64_s32(double_exponent, single_scratch); + + // Returning or bailing out. + Counters* counters = masm->isolate()->counters(); + if (exponent_type_ == ON_STACK) { + // The arguments are still on the stack. + __ bind(&call_runtime); + __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + + // The stub is called from non-optimized code, which expects the result + // as heap number in exponent. + __ bind(&done); + __ AllocateHeapNumber( + heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); - __ mov(r0, heapnumber); - __ Ret(2 * kPointerSize); - } + ASSERT(heapnumber.is(r0)); + __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); + __ Ret(2); + } else { + __ push(lr); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(0, 2, scratch); + __ SetCallCDoubleArguments(double_base, double_exponent); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + } + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); - __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ bind(&done); + __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); + __ Ret(); + } } @@ -3319,6 +3643,37 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle<Code> code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + StoreBufferOverflowStub stub(kSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle<Code> code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(r0); } @@ -3430,8 +3785,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r3, MemOperand(ip)); + __ mov(r3, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(ip)); @@ -3469,9 +3823,10 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ sub(r6, r6, Operand(kPointerSize)); // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); - // Setup argc and the builtin function in callee-saved registers. + // Set up argc and the builtin function in callee-saved registers. __ mov(r4, Operand(r0)); __ mov(r5, Operand(r1)); @@ -3527,7 +3882,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r3: argc // [sp+0]: argv - Label invoke, exit; + Label invoke, handler_entry, exit; // Called from C, so do not pop argc and args on exit (preserve sp) // No need to save register-passed args @@ -3548,7 +3903,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r2: receiver // r3: argc - // Setup argv in r4. + // Set up argv in r4. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; if (CpuFeatures::IsSupported(VFP3)) { offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; @@ -3571,7 +3926,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ ldr(r5, MemOperand(r5)); __ Push(r8, r7, r6, r5); - // Setup frame pointer for the frame to be pushed. + // Set up frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); // If this is the outermost JS call, set js_entry_sp value. @@ -3590,31 +3945,33 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ bind(&cont); __ push(ip); - // Call a faked try-block that does the invoke. - __ bl(&invoke); - - // Caught exception: Store result (exception) in the pending - // exception field in the JSEnv and return a failure sentinel. - // Coming in here the fp will be invalid because the PushTryHandler below - // sets it to 0 to signal the existence of the JSEntry frame. + // Jump to a faked try block that does the invoke, with a faked catch + // block that sets the pending exception. + __ jmp(&invoke); + __ bind(&handler_entry); + handler_offset_ = handler_entry.pos(); + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushTryHandler below sets it to 0 to + // signal the existence of the JSEntry frame. __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r0, MemOperand(ip)); __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); __ b(&exit); - // Invoke: Link this frame into the handler chain. + // Invoke: Link this frame into the handler chain. There's only one + // handler block in this code object, so its index is 0. __ bind(&invoke); // Must preserve r0-r4, r5-r7 are available. - __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + __ PushTryHandler(StackHandler::JS_ENTRY, 0); // If an exception not caught by another handler occurs, this handler // returns control to the code after the bl(&invoke) above, which // restores all kCalleeSaved registers (including cp and fp) to their // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r5, MemOperand(ip)); + __ mov(r5, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r5, MemOperand(ip)); @@ -3708,7 +4065,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { const Register inline_site = r9; const Register scratch = r2; - const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize; + const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; Label slow, loop, is_instance, is_not_instance, not_js_object; @@ -3725,11 +4082,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // real lookup and update the call site cache. if (!HasCallSiteInlineCheck()) { Label miss; - __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); - __ cmp(function, ip); + __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ b(ne, &miss); - __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); - __ cmp(map, ip); + __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); __ b(ne, &miss); __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); __ Ret(HasArgsInRegisters() ? 0 : 2); @@ -3738,7 +4093,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } // Get the prototype of the function. - __ TryGetFunctionPrototype(function, prototype, scratch, &slow); + __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); // Check that the function prototype is a JS object. __ JumpIfSmi(prototype, &slow); @@ -3759,7 +4114,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); - __ str(map, MemOperand(scratch)); + __ ldr(scratch, MemOperand(scratch)); + __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); } // Register mapping: r3 is object map and r4 is function prototype. @@ -3851,10 +4207,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { - __ EnterInternalFrame(); - __ Push(r0, r1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r0, r1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } __ cmp(r0, Operand::Zero()); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); @@ -4027,7 +4384,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ str(r3, FieldMemOperand(r0, i)); } - // Setup the callee in-object property. + // Set up the callee in-object property. STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); const int kCalleeOffset = JSObject::kHeaderSize + @@ -4040,7 +4397,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Heap::kArgumentsLengthIndex * kPointerSize; __ str(r2, FieldMemOperand(r0, kLengthOffset)); - // Setup the elements pointer in the allocated arguments object. + // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, r4 will point there, otherwise // it will point to the backing store. __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); @@ -4135,7 +4492,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ Ret(); // Do the runtime call to allocate the arguments object. - // r2 = argument count (taggged) + // r2 = argument count (tagged) __ bind(&runtime); __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); @@ -4208,7 +4565,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Get the parameters pointer from the stack. __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); - // Setup the elements pointer in the allocated arguments object and + // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); @@ -4220,7 +4577,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Copy the fixed array slots. Label loop; - // Setup r4 to point to the first array slot. + // Set up r4 to point to the first array slot. __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ bind(&loop); // Pre-decrement r2 with kPointerSize on each iteration. @@ -4250,10 +4607,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } // Stack frame on entry. // sp[0]: last_match_info (expected JSArray) @@ -4285,7 +4638,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference::address_of_regexp_stack_memory_size(isolate); __ mov(r0, Operand(address_of_regexp_stack_memory_size)); __ ldr(r0, MemOperand(r0, 0)); - __ tst(r0, Operand(r0)); + __ cmp(r0, Operand(0)); __ b(eq, &runtime); // Check that the first argument is a JSRegExp object. @@ -4356,8 +4709,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(last_match_info_elements, FieldMemOperand(r0, JSArray::kElementsOffset)); __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r0, ip); + __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); __ b(ne, &runtime); // Check that the last match info has space for the capture registers and the // additional information. @@ -4375,25 +4727,39 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label seq_string; __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - // First check for flat string. - __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC); + // First check for flat string. None of the following string type tests will + // succeed if subject is not a string or a short external string. + __ and_(r1, + r0, + Operand(kIsNotStringMask | + kStringRepresentationMask | + kShortExternalStringMask), + SetCC); STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); __ b(eq, &seq_string); // subject: Subject string // regexp_data: RegExp data (FixedArray) + // r1: whether subject is a string and if yes, its string representation // Check for flat cons string or sliced string. // A flat cons string is a cons string where the second part is the empty // string. In that case the subject string is just the first part of the cons // string. Also in this case the first part of the cons string is known to be // a sequential string or an external string. // In the case of a sliced string its offset has to be taken into account. - Label cons_string, check_encoding; + Label cons_string, external_string, check_encoding; STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); + STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); + STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); __ cmp(r1, Operand(kExternalStringTag)); __ b(lt, &cons_string); - __ b(eq, &runtime); + __ b(eq, &external_string); + + // Catch non-string subject or short external string. + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); + __ b(ne, &runtime); // String is sliced. __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); @@ -4404,8 +4770,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // String is a cons string, check whether it is flat. __ bind(&cons_string); __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); - __ LoadRoot(r1, Heap::kEmptyStringRootIndex); - __ cmp(r0, r1); + __ CompareRoot(r0, Heap::kEmptyStringRootIndex); __ b(ne, &runtime); __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); // Is first part of cons or parent of slice a flat string? @@ -4414,7 +4779,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSeqStringTag == 0); __ tst(r0, Operand(kStringRepresentationMask)); - __ b(ne, &runtime); + __ b(ne, &external_string); + __ bind(&seq_string); // subject: Subject string // regexp_data: RegExp data (FixedArray) @@ -4480,8 +4846,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); __ eor(r3, r3, Operand(1)); // Load the length from the original subject string from the previous stack // frame. Therefore we have to use fp, which points exactly to two pointer @@ -4532,8 +4897,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r1, MemOperand(r1, 0)); + __ mov(r1, Operand(isolate->factory()->the_hole_value())); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(r2, 0)); @@ -4575,16 +4939,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ str(r2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. - __ mov(r3, last_match_info_elements); // Moved up to reduce latency. __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); + __ mov(r2, subject); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastSubjectOffset, + r2, + r7, + kLRHasNotBeenSaved, + kDontSaveFPRegs); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); - __ mov(r3, last_match_info_elements); - __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastInputOffset, + subject, + r7, + kLRHasNotBeenSaved, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -4615,6 +4988,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ add(sp, sp, Operand(4 * kPointerSize)); __ Ret(); + // External string. Short external strings have already been ruled out. + // r0: scratch + __ bind(&external_string); + __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + if (FLAG_debug_code) { + // Assert that we do not have a cons or slice (indirect strings) here. + // Sequential strings have already been ruled out. + __ tst(r0, Operand(kIsIndirectStringMask)); + __ Assert(eq, "external string expected, but not found"); + } + __ ldr(subject, + FieldMemOperand(subject, ExternalString::kResourceDataOffset)); + // Move the pointer so that offset-wise, it looks like a sequential string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ sub(subject, + subject, + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ jmp(&seq_string); + // Do the runtime call to execute the regexp. __ bind(&runtime); __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); @@ -4670,11 +5063,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // Set input, index and length fields from arguments. __ ldr(r1, MemOperand(sp, kPointerSize * 0)); + __ ldr(r2, MemOperand(sp, kPointerSize * 1)); + __ ldr(r6, MemOperand(sp, kPointerSize * 2)); __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); - __ ldr(r1, MemOperand(sp, kPointerSize * 1)); - __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); - __ ldr(r1, MemOperand(sp, kPointerSize * 2)); - __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); + __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); + __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset)); // Fill out the elements FixedArray. // r0: JSArray, tagged. @@ -4696,9 +5089,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // r3: Start of elements in FixedArray. // r5: Number of elements to fill. Label loop; - __ tst(r5, Operand(r5)); + __ cmp(r5, Operand(0)); __ bind(&loop); - __ b(le, &done); // Jump if r1 is negative or zero. + __ b(le, &done); // Jump if r5 is negative or zero. __ sub(r5, r5, Operand(1), SetCC); __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); __ jmp(&loop); @@ -4712,7 +5105,48 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // r1 : the function to call + // r2 : cache cell for call target + Label done; + + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), + masm->isolate()->heap()->the_hole_value()); + + // Load the cache state into r3. + __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(r3, r1); + __ b(eq, &done); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ b(eq, &done); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne); + __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne); + + // An uninitialized cache is patched with the function. + __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { + // r1 : the function to call + // r2 : cache cell for call target Label slow, non_function; // The receiver might implicitly be the global object. This is @@ -4727,16 +5161,12 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); __ b(ne, &call); // Patch the receiver on the stack with the global receiver object. - __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); - __ str(r1, MemOperand(sp, argc_ * kPointerSize)); + __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); + __ str(r2, MemOperand(sp, argc_ * kPointerSize)); __ bind(&call); } - // Get the function to call from the stack. - // function, receiver [, arguments] - __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); - // Check that the function is really a JavaScript function. // r1: pushed function (to be verified) __ JumpIfSmi(r1, &non_function); @@ -4774,7 +5204,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); - __ SetCallKind(r5, CALL_AS_FUNCTION); + __ SetCallKind(r5, CALL_AS_METHOD); { Handle<Code> adaptor = masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); @@ -4785,7 +5215,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // of the original receiver from the call site). __ bind(&non_function); __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Setup the number of arguments. + __ mov(r0, Operand(argc_)); // Set up the number of arguments. __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ SetCallKind(r5, CALL_AS_METHOD); @@ -4794,6 +5224,48 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { } +void CallConstructStub::Generate(MacroAssembler* masm) { + // r0 : number of arguments + // r1 : the function to call + // r2 : cache cell for call target + Label slow, non_function_call; + + // Check that the function is not a smi. + __ JumpIfSmi(r1, &non_function_call); + // Check that the function is a JSFunction. + __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); + __ b(ne, &slow); + + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + } + + // Jump to the function-specific construct stub. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset)); + __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // r0: number of arguments + // r1: called object + // r3: object type + Label do_call; + __ bind(&slow); + __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(ne, &non_function_call); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + + __ bind(&non_function_call); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); + // Set expected number of arguments to zero (not changing r0). + __ mov(r2, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r5, CALL_AS_METHOD); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. void CompareStub::PrintName(StringStream* stream) { @@ -4855,100 +5327,41 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { // If the index is non-smi trigger the non-smi case. __ JumpIfNotSmi(index_, &index_not_smi_); - - // Put smi-tagged index into scratch register. - __ mov(scratch_, index_); __ bind(&got_smi_index_); // Check for index out of range. __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); - __ cmp(ip, Operand(scratch_)); + __ cmp(ip, Operand(index_)); __ b(ls, index_out_of_range_); - // We need special handling for non-flat strings. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(result_, Operand(kStringRepresentationMask)); - __ b(eq, &flat_string); + __ mov(index_, Operand(index_, ASR, kSmiTagSize)); - // Handle non-flat strings. - __ and_(result_, result_, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kConsStringTag < kExternalStringTag); - STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); - __ cmp(result_, Operand(kExternalStringTag)); - __ b(gt, &sliced_string); - __ b(eq, &call_runtime_); - - // ConsString. - // Check whether the right hand side is the empty string (i.e. if - // this is really a flat string in a cons string). If that is not - // the case we would rather go to the runtime system now to flatten - // the string. - Label assure_seq_string; - __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); - __ LoadRoot(ip, Heap::kEmptyStringRootIndex); - __ cmp(result_, Operand(ip)); - __ b(ne, &call_runtime_); - // Get the first of the two strings and load its instance type. - __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); - __ jmp(&assure_seq_string); - - // SlicedString, unpack and add offset. - __ bind(&sliced_string); - __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset)); - __ add(scratch_, scratch_, result_); - __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); + StringCharLoadGenerator::Generate(masm, + object_, + index_, + result_, + &call_runtime_); - // Assure that we are dealing with a sequential string. Go to runtime if not. - __ bind(&assure_seq_string); - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - // Check that parent is not an external string. Go to runtime otherwise. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(result_, Operand(kStringRepresentationMask)); - __ b(ne, &call_runtime_); - - // Check for 1-byte or 2-byte string. - __ bind(&flat_string); - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); - STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); - __ tst(result_, Operand(kStringEncodingMask)); - __ b(ne, &ascii_string); - - // 2-byte string. - // Load the 2-byte character code into the result register. We can - // add without shifting since the smi tag size is the log2 of the - // number of bytes in a two-byte character. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); - __ add(scratch_, object_, Operand(scratch_)); - __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); - __ jmp(&got_char_code); - - // ASCII string. - // Load the byte into the result register. - __ bind(&ascii_string); - __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); - __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); - - __ bind(&got_char_code); __ mov(result_, Operand(result_, LSL, kSmiTagSize)); __ bind(&exit_); } void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { __ Abort("Unexpected fallthrough to CharCodeAt slow case"); // Index is not a smi. __ bind(&index_not_smi_); // If index is a heap number, try converting it to an integer. __ CheckMap(index_, - scratch_, + result_, Heap::kHeapNumberMapRootIndex, index_not_number_, DONT_DO_SMI_CHECK); call_helper.BeforeCall(masm); - __ Push(object_, index_); + __ push(object_); __ push(index_); // Consumed by runtime conversion function. if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); @@ -4959,15 +5372,14 @@ void StringCharCodeAtGenerator::GenerateSlow( } // Save the conversion result before the pop instructions below // have a chance to overwrite it. - __ Move(scratch_, r0); - __ pop(index_); + __ Move(index_, r0); __ pop(object_); // Reload the instance type. __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); call_helper.AfterCall(masm); // If index is still not a smi, it must be out of range. - __ JumpIfNotSmi(scratch_, index_out_of_range_); + __ JumpIfNotSmi(index_, index_out_of_range_); // Otherwise, return to the fast path. __ jmp(&got_smi_index_); @@ -4976,6 +5388,7 @@ void StringCharCodeAtGenerator::GenerateSlow( // is too complex (e.g., when the string needs to be flattened). __ bind(&call_runtime_); call_helper.BeforeCall(masm); + __ mov(index_, Operand(index_, LSL, kSmiTagSize)); __ Push(object_, index_); __ CallRuntime(Runtime::kStringCharCodeAt, 2); __ Move(result_, r0); @@ -5004,15 +5417,15 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(result_, Operand(ip)); + __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); __ b(eq, &slow_case_); __ bind(&exit_); } void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { __ Abort("Unexpected fallthrough to CharFromCode slow case"); __ bind(&slow_case_); @@ -5037,7 +5450,8 @@ void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + MacroAssembler* masm, + const RuntimeCallHelper& call_helper) { char_code_at_generator_.GenerateSlow(masm, call_helper); char_from_code_generator_.GenerateSlow(masm, call_helper); } @@ -5321,11 +5735,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, __ cmp(undefined, candidate); __ b(eq, not_found); - // Must be null (deleted entry). + // Must be the hole (deleted entry). if (FLAG_debug_code) { - __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(ip, candidate); - __ Assert(eq, "oddball in symbol table is not undefined or null"); + __ Assert(eq, "oddball in symbol table is not undefined or the hole"); } __ jmp(&next_probe[i]); @@ -5421,37 +5835,24 @@ void SubStringStub::Generate(MacroAssembler* masm) { static const int kFromOffset = 1 * kPointerSize; static const int kStringOffset = 2 * kPointerSize; - // Check bounds and smi-ness. - Register to = r6; - Register from = r7; - - __ Ldrd(to, from, MemOperand(sp, kToOffset)); + __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); STATIC_ASSERT(kFromOffset == kToOffset + 4); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); // I.e., arithmetic shift right by one un-smi-tags. - __ mov(r2, Operand(to, ASR, 1), SetCC); - __ mov(r3, Operand(from, ASR, 1), SetCC, cc); + __ mov(r2, Operand(r2, ASR, 1), SetCC); + __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); // If either to or from had the smi tag bit set, then carry is set now. __ b(cs, &runtime); // Either "from" or "to" is not a smi. - __ b(mi, &runtime); // From is negative. - - // Both to and from are smis. - __ sub(r2, r2, Operand(r3), SetCC); + // We want to bailout to runtime here if From is negative. In that case, the + // next instruction is not executed and we fall through to bailing out to + // runtime. pl is the opposite of mi. + // Both r2 and r3 are untagged integers. + __ sub(r2, r2, Operand(r3), SetCC, pl); __ b(mi, &runtime); // Fail if from > to. - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache in - // generated code. - __ cmp(r2, Operand(2)); - __ b(lt, &runtime); - // r2: result string length - // r3: from index (untagged smi) - // r6 (a.k.a. to): to (smi) - // r7 (a.k.a. from): from offset (smi) - // Make sure first argument is a sequential (or flat) string. + // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(r0, &runtime); @@ -5466,67 +5867,15 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ cmp(r2, Operand(r4, ASR, 1)); __ b(eq, &return_r0); - Label create_slice; - if (FLAG_string_slices) { - __ cmp(r2, Operand(SlicedString::kMinLength)); - __ b(ge, &create_slice); - } - - // r0: original string - // r1: instance type - // r2: result string length - // r3: from index (untagged smi) - // r6 (a.k.a. to): to (smi) - // r7 (a.k.a. from): from offset (smi) - Label seq_string; - __ and_(r4, r1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kSeqStringTag < kConsStringTag); - STATIC_ASSERT(kConsStringTag < kExternalStringTag); - STATIC_ASSERT(kConsStringTag < kSlicedStringTag); - __ cmp(r4, Operand(kConsStringTag)); - __ b(gt, &runtime); // Slices and external strings go to runtime. - __ b(lt, &seq_string); // Sequential strings are handled directly. - - // Cons string. Try to recurse (once) on the first substring. - // (This adds a little more generality than necessary to handle flattened - // cons strings, but not much). - __ ldr(r0, FieldMemOperand(r0, ConsString::kFirstOffset)); - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ tst(r1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kSeqStringTag == 0); - __ b(ne, &runtime); // Cons, slices and external strings go to runtime. - - // Definitly a sequential string. - __ bind(&seq_string); - - // r0: original string - // r1: instance type - // r2: result string length - // r3: from index (untagged smi) - // r6 (a.k.a. to): to (smi) - // r7 (a.k.a. from): from offset (smi) - __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); - __ cmp(r4, Operand(to)); - __ b(lt, &runtime); // Fail if to > length. - to = no_reg; - - // r0: original string or left hand side of the original cons string. - // r1: instance type - // r2: result string length - // r3: from index (untagged smi) - // r7 (a.k.a. from): from offset (smi) - // Check for flat ASCII string. - Label non_ascii_flat; - __ tst(r1, Operand(kStringEncodingMask)); - STATIC_ASSERT(kTwoByteStringTag == 0); - __ b(eq, &non_ascii_flat); - Label result_longer_than_two; + // Check for special case of two character ASCII string, in which case + // we do a lookup in the symbol table first. __ cmp(r2, Operand(2)); __ b(gt, &result_longer_than_two); + __ b(lt, &runtime); + + __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime); - // Sub string of length 2 requested. // Get the two characters forming the sub string. __ add(r0, r0, Operand(r3)); __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); @@ -5536,7 +5885,6 @@ void SubStringStub::Generate(MacroAssembler* masm) { Label make_two_character_string; StringHelper::GenerateTwoCharacterSymbolTableProbe( masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); - Counters* counters = masm->isolate()->counters(); __ jmp(&return_r0); // r2: result string length. @@ -5547,18 +5895,114 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ jmp(&return_r0); __ bind(&result_longer_than_two); + // Deal with different string types: update the index if necessary + // and put the underlying string into r5. + // r0: original string + // r1: instance type + // r2: length + // r3: from index (untagged) + Label underlying_unpacked, sliced_string, seq_or_external_string; + // If the string is not indirect, it can only be sequential or external. + STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); + STATIC_ASSERT(kIsIndirectStringMask != 0); + __ tst(r1, Operand(kIsIndirectStringMask)); + __ b(eq, &seq_or_external_string); + + __ tst(r1, Operand(kSlicedNotConsMask)); + __ b(ne, &sliced_string); + // Cons string. Check whether it is flat, then fetch first part. + __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); + __ CompareRoot(r5, Heap::kEmptyStringRootIndex); + __ b(ne, &runtime); + __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); + // Update instance type. + __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); + __ jmp(&underlying_unpacked); - // Locate 'from' character of string. - __ add(r5, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(from, ASR, 1)); + __ bind(&sliced_string); + // Sliced string. Fetch parent and correct start index by offset. + __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); + __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); + __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index. + // Update instance type. + __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); + __ jmp(&underlying_unpacked); - // Allocate the result. - __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); + __ bind(&seq_or_external_string); + // Sequential or external string. Just move string to the expected register. + __ mov(r5, r0); - // r0: result string - // r2: result string length - // r5: first character of substring to copy - // r7 (a.k.a. from): from offset (smi) + __ bind(&underlying_unpacked); + + if (FLAG_string_slices) { + Label copy_routine; + // r5: underlying subject string + // r1: instance type of underlying subject string + // r2: length + // r3: adjusted start index (untagged) + __ cmp(r2, Operand(SlicedString::kMinLength)); + // Short slice. Copy instead of slicing. + __ b(lt, ©_routine); + // Allocate new sliced string. At this point we do not reload the instance + // type including the string encoding because we simply rely on the info + // provided by the original string. It does not matter if the original + // string's encoding is wrong because we always have to recheck encoding of + // the newly created string's parent anyways due to externalized strings. + Label two_byte_slice, set_slice_header; + STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); + __ tst(r1, Operand(kStringEncodingMask)); + __ b(eq, &two_byte_slice); + __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); + __ jmp(&set_slice_header); + __ bind(&two_byte_slice); + __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); + __ bind(&set_slice_header); + __ mov(r3, Operand(r3, LSL, 1)); + __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); + __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); + __ jmp(&return_r0); + + __ bind(©_routine); + } + + // r5: underlying subject string + // r1: instance type of underlying subject string + // r2: length + // r3: adjusted start index (untagged) + Label two_byte_sequential, sequential_string, allocate_result; + STATIC_ASSERT(kExternalStringTag != 0); + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(r1, Operand(kExternalStringTag)); + __ b(eq, &sequential_string); + + // Handle external string. + // Rule out short external strings. + STATIC_CHECK(kShortExternalStringTag != 0); + __ tst(r1, Operand(kShortExternalStringTag)); + __ b(ne, &runtime); + __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset)); + // r5 already points to the first character of underlying string. + __ jmp(&allocate_result); + + __ bind(&sequential_string); + // Locate first character of underlying subject string. + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + + __ bind(&allocate_result); + // Sequential acii string. Allocate the result. + STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + __ tst(r1, Operand(kStringEncodingMask)); + __ b(eq, &two_byte_sequential); + + // Allocate and copy the resulting ASCII string. + __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); + + // Locate first character of substring to copy. + __ add(r5, r5, r3); // Locate first character of result. __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); @@ -5571,30 +6015,16 @@ void SubStringStub::Generate(MacroAssembler* masm) { COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_r0); - __ bind(&non_ascii_flat); - // r0: original string - // r2: result string length - // r7 (a.k.a. from): from offset (smi) - // Check for flat two byte string. + // Allocate and copy the resulting two-byte string. + __ bind(&two_byte_sequential); + __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); - // Locate 'from' character of string. - __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // As "from" is a smi it is 2 times the value which matches the size of a two - // byte character. + // Locate first character of substring to copy. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ add(r5, r5, Operand(from)); - - // Allocate the result. - __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); - - // r0: result string - // r2: result string length - // r5: first character of substring to copy + __ add(r5, r5, Operand(r3, LSL, 1)); // Locate first character of result. __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - from = no_reg; - // r0: result string. // r1: first character of result. // r2: result length. @@ -5602,72 +6032,9 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong( masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); - __ jmp(&return_r0); - - if (FLAG_string_slices) { - __ bind(&create_slice); - // r0: original string - // r1: instance type - // r2: length - // r3: from index (untagged smi) - // r6 (a.k.a. to): to (smi) - // r7 (a.k.a. from): from offset (smi) - Label allocate_slice, sliced_string, seq_string; - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(r1, Operand(kStringRepresentationMask)); - __ b(eq, &seq_string); - STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); - STATIC_ASSERT(kIsIndirectStringMask != 0); - __ tst(r1, Operand(kIsIndirectStringMask)); - // External string. Jump to runtime. - __ b(eq, &runtime); - - __ tst(r1, Operand(kSlicedNotConsMask)); - __ b(ne, &sliced_string); - // Cons string. Check whether it is flat, then fetch first part. - __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); - __ LoadRoot(r9, Heap::kEmptyStringRootIndex); - __ cmp(r5, r9); - __ b(ne, &runtime); - __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); - __ jmp(&allocate_slice); - - __ bind(&sliced_string); - // Sliced string. Fetch parent and correct start index by offset. - __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset)); - __ add(r7, r7, r5); - __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); - __ jmp(&allocate_slice); - - __ bind(&seq_string); - // Sequential string. Just move string to the right register. - __ mov(r5, r0); - - __ bind(&allocate_slice); - // r1: instance type of original string - // r2: length - // r5: underlying subject string - // r7 (a.k.a. from): from offset (smi) - // Allocate new sliced string. At this point we do not reload the instance - // type including the string encoding because we simply rely on the info - // provided by the original string. It does not matter if the original - // string's encoding is wrong because we always have to recheck encoding of - // the newly created string's parent anyways due to externalized strings. - Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); - STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); - __ tst(r1, Operand(kStringEncodingMask)); - __ b(eq, &two_byte_slice); - __ AllocateAsciiSlicedString(r0, r2, r3, r4, &runtime); - __ jmp(&set_slice_header); - __ bind(&two_byte_slice); - __ AllocateTwoByteSlicedString(r0, r2, r3, r4, &runtime); - __ bind(&set_slice_header); - __ str(r7, FieldMemOperand(r0, SlicedString::kOffsetOffset)); - __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); - } __ bind(&return_r0); + Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -5700,7 +6067,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); - __ tst(length, Operand(length)); + __ cmp(length, Operand(0)); __ b(ne, &compare_chars); __ mov(r0, Operand(Smi::FromInt(EQUAL))); __ Ret(); @@ -5733,7 +6100,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ mov(scratch1, scratch2, LeaveCC, gt); Register min_length = scratch1; STATIC_ASSERT(kSmiTag == 0); - __ tst(min_length, Operand(min_length)); + __ cmp(min_length, Operand(0)); __ b(eq, &compare_lengths); // Compare loop. @@ -5824,7 +6191,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime, call_builtin; + Label call_runtime, call_builtin; Builtins::JavaScript builtin_id = Builtins::ADD; Counters* counters = masm->isolate()->counters(); @@ -5839,7 +6206,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Make sure that both arguments are strings if not known in advance. if (flags_ == NO_STRING_ADD_FLAGS) { - __ JumpIfEitherSmi(r0, r1, &string_add_runtime); + __ JumpIfEitherSmi(r0, r1, &call_runtime); // Load instance types. __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -5849,7 +6216,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If either is not a string, go to runtime. __ tst(r4, Operand(kIsNotStringMask)); __ tst(r5, Operand(kIsNotStringMask), eq); - __ b(ne, &string_add_runtime); + __ b(ne, &call_runtime); } else { // Here at least one of the arguments is definitely a string. // We convert the one that is not known to be a string. @@ -5918,7 +6285,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); } __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, - &string_add_runtime); + &call_runtime); // Get the two characters forming the sub string. __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); @@ -5940,7 +6307,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // halfword store instruction (which assumes that processor is // in a little endian mode) __ mov(r6, Operand(2)); - __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); + __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); @@ -5948,14 +6315,14 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&longer_than_two); // Check if resulting string will be flat. - __ cmp(r6, Operand(String::kMinNonFlatLength)); + __ cmp(r6, Operand(ConsString::kMinLength)); __ b(lt, &string_add_flat_result); // Handle exceptionally long strings in the runtime system. STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); ASSERT(IsPowerOf2(String::kMaxLength + 1)); // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. __ cmp(r6, Operand(String::kMaxLength + 1)); - __ b(hs, &string_add_runtime); + __ b(hs, &call_runtime); // If result is not supposed to be flat, allocate a cons string object. // If both strings are ASCII the result is an ASCII cons string. @@ -5973,7 +6340,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Allocate an ASCII cons string. __ bind(&ascii_data); - __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); + __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); __ bind(&allocated); // Fill the fields of the cons string. __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); @@ -5998,11 +6365,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(eq, &ascii_data); // Allocate a two byte cons string. - __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); + __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); __ jmp(&allocated); - // Handle creating a flat result. First check that both strings are - // sequential and that they have the same encoding. + // We cannot encounter sliced strings or cons strings here since: + STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); + // Handle creating a flat result from either external or sequential strings. + // Locate the first characters' locations. // r0: first string // r1: second string // r2: length of first string @@ -6010,6 +6379,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // r6: sum of lengths. + Label first_prepared, second_prepared; __ bind(&string_add_flat_result); if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -6017,97 +6387,88 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); } - // Check that both strings are sequential. + + // Check whether both strings have same encoding + __ eor(r7, r4, Operand(r5)); + __ tst(r7, Operand(kStringEncodingMask)); + __ b(ne, &call_runtime); + STATIC_ASSERT(kSeqStringTag == 0); __ tst(r4, Operand(kStringRepresentationMask)); - __ tst(r5, Operand(kStringRepresentationMask), eq); - __ b(ne, &string_add_runtime); - // Now check if both strings have the same encoding (ASCII/Two-byte). - // r0: first string. - // r1: second string. + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ add(r7, + r0, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), + LeaveCC, + eq); + __ b(eq, &first_prepared); + // External string: rule out short external string and load string resource. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ tst(r4, Operand(kShortExternalStringMask)); + __ b(ne, &call_runtime); + __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); + __ bind(&first_prepared); + + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(r5, Operand(kStringRepresentationMask)); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ add(r1, + r1, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), + LeaveCC, + eq); + __ b(eq, &second_prepared); + // External string: rule out short external string and load string resource. + STATIC_ASSERT(kShortExternalStringTag != 0); + __ tst(r5, Operand(kShortExternalStringMask)); + __ b(ne, &call_runtime); + __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); + __ bind(&second_prepared); + + Label non_ascii_string_add_flat_result; + // r7: first character of first string + // r1: first character of second string // r2: length of first string. // r3: length of second string. - // r6: sum of lengths.. - Label non_ascii_string_add_flat_result; - ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. - __ eor(r7, r4, Operand(r5)); - __ tst(r7, Operand(kStringEncodingMask)); - __ b(ne, &string_add_runtime); - // And see if it's ASCII or two-byte. - __ tst(r4, Operand(kStringEncodingMask)); + // r6: sum of lengths. + // Both strings have the same encoding. + STATIC_ASSERT(kTwoByteStringTag == 0); + __ tst(r5, Operand(kStringEncodingMask)); __ b(eq, &non_ascii_string_add_flat_result); - // Both strings are sequential ASCII strings. We also know that they are - // short (since the sum of the lengths is less than kMinNonFlatLength). - // r6: length of resulting flat string - __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); - // Locate first character of result. - __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // r0: first character of first string. - // r1: second string. + __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); + __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // r0: result string. + // r7: first character of first string. + // r1: first character of second string. // r2: length of first string. // r3: length of second string. // r6: first character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); - - // Load second argument and locate first character. - __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // r1: first character of second string. - // r3: length of second string. + StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); // r6: next character of result. - // r7: result string. StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); - __ mov(r0, Operand(r7)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); __ bind(&non_ascii_string_add_flat_result); - // Both strings are sequential two byte strings. - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: sum of length of strings. - __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r7: result string. - - // Locate first character of result. - __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - - // r0: first character of first string. - // r1: second string. + __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); + __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // r0: result string. + // r7: first character of first string. + // r1: first character of second string. // r2: length of first string. // r3: length of second string. // r6: first character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); - - // Locate first character of second argument. - __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - - // r1: first character of second string. - // r3: length of second string. - // r6: next character of result (after copy of first string). - // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); + // r6: next character of result. StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); - - __ mov(r0, Operand(r7)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); // Just jump to runtime to add the two strings. - __ bind(&string_add_runtime); + __ bind(&call_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); if (call_builtin.is_linked()) { @@ -6359,25 +6720,47 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { } +void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { + Label miss; + __ and_(r2, r1, Operand(r0)); + __ JumpIfSmi(r2, &miss); + __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ cmp(r2, Operand(known_map_)); + __ b(ne, &miss); + __ cmp(r3, Operand(known_map_)); + __ b(ne, &miss); + + __ sub(r0, r0, Operand(r1)); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + + void ICCompareStub::GenerateMiss(MacroAssembler* masm) { - __ Push(r1, r0); - __ push(lr); + { + // Call the runtime system in a fresh internal frame. + ExternalReference miss = + ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r1, r0); + __ push(lr); + __ Push(r1, r0); + __ mov(ip, Operand(Smi::FromInt(op_))); + __ push(ip); + __ CallExternalReference(miss, 3); + // Compute the entry point of the rewritten stub. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Restore registers. + __ pop(lr); + __ pop(r0); + __ pop(r1); + } - // Call the runtime system in a fresh internal frame. - ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - __ EnterInternalFrame(); - __ Push(r1, r0); - __ mov(ip, Operand(Smi::FromInt(op_))); - __ push(ip); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); - // Compute the entry point of the rewritten stub. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore registers. - __ pop(lr); - __ pop(r0); - __ pop(r1); __ Jump(r2); } @@ -6410,14 +6793,13 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, } -MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( - MacroAssembler* masm, - Label* miss, - Label* done, - Register receiver, - Register properties, - String* name, - Register scratch0) { +void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle<String> name, + Register scratch0) { // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -6475,14 +6857,12 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ mov(r1, Operand(Handle<String>(name))); StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); - MaybeObject* result = masm->TryCallStub(&stub); - if (result->IsFailure()) return result; - __ tst(r0, Operand(r0)); + __ CallStub(&stub); + __ cmp(r0, Operand(0)); __ ldm(ia_w, sp, spill_mask); __ b(eq, done); __ b(ne, miss); - return result; } @@ -6497,6 +6877,11 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, Register name, Register scratch1, Register scratch2) { + ASSERT(!elements.is(scratch1)); + ASSERT(!elements.is(scratch2)); + ASSERT(!name.is(scratch1)); + ASSERT(!name.is(scratch2)); + // Assert that name contains a string. if (FLAG_debug_code) __ AbortIfNotString(name); @@ -6540,11 +6925,17 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, ~(scratch1.bit() | scratch2.bit()); __ stm(db_w, sp, spill_mask); - __ Move(r0, elements); - __ Move(r1, name); + if (name.is(r0)) { + ASSERT(!elements.is(r1)); + __ Move(r1, name); + __ Move(r0, elements); + } else { + __ Move(r0, elements); + __ Move(r1, name); + } StringDictionaryLookupStub stub(POSITIVE_LOOKUP); __ CallStub(&stub); - __ tst(r0, Operand(r0)); + __ cmp(r0, Operand(0)); __ mov(scratch2, Operand(r2)); __ ldm(ia_w, sp, spill_mask); @@ -6554,6 +6945,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Registers: // result: StringDictionary to probe // r1: key @@ -6643,6 +7036,333 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { r6, r4, r7, EMIT_REMEMBERED_SET }, + { r6, r2, r7, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. + // Also used in KeyedStoreIC::GenerateGeneric. + { r3, r4, r5, EMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal. + { r4, r1, r2, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. + { r1, r2, r3, EMIT_REMEMBERED_SET }, + { r3, r2, r1, EMIT_REMEMBERED_SET }, + // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { r2, r1, r3, EMIT_REMEMBERED_SET }, + { r3, r1, r2, EMIT_REMEMBERED_SET }, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { r4, r2, r3, EMIT_REMEMBERED_SET }, + // ElementsTransitionGenerator::GenerateSmiOnlyToObject + // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // and ElementsTransitionGenerator::GenerateDoubleToObject + { r2, r3, r9, EMIT_REMEMBERED_SET }, + // ElementsTransitionGenerator::GenerateDoubleToObject + { r6, r2, r0, EMIT_REMEMBERED_SET }, + { r2, r6, r9, EMIT_REMEMBERED_SET }, + // StoreArrayLiteralElementStub::Generate + { r5, r0, r6, EMIT_REMEMBERED_SET }, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +bool StoreBufferOverflowStub::IsPregenerated() { + return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + // See RecordWriteStub::Patch for details. + __ b(&skip_to_incremental_noncompacting); + __ b(&skip_to_incremental_compacting); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } + __ Ret(); + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); + ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); + PatchBranchIntoNop(masm, 0); + PatchBranchIntoNop(masm, Assembler::kInstrSize); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ Ret(); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + Register address = + r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); + ASSERT(!address.is(regs_.object())); + ASSERT(!address.is(r0)); + __ Move(address, regs_.address()); + __ Move(r0, regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ Move(r1, address); + } else { + ASSERT(mode == INCREMENTAL); + __ ldr(r1, MemOperand(address, 0)); + } + __ mov(r2, Operand(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_scratch; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + eq, + &ensure_not_white); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + eq, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need extra registers for this, so we push the object and the address + // register temporarily. + __ Push(regs_.object(), regs_.address()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + +void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r0 : element value to store + // -- r1 : array literal + // -- r2 : map of array literal + // -- r3 : element index as smi + // -- r4 : array literal index in function as smi + // ----------------------------------- + + Label element_done; + Label double_elements; + Label smi_element; + Label slow_elements; + Label fast_elements; + + __ CheckFastElements(r2, r5, &double_elements); + // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + __ JumpIfSmi(r0, &smi_element); + __ CheckFastSmiOnlyElements(r2, r5, &fast_elements); + + // Store into the array literal requires a elements transition. Call into + // the runtime. + __ bind(&slow_elements); + // call. + __ Push(r1, r3, r0); + __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); + __ Push(r5, r4); + __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); + + // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + __ bind(&fast_elements); + __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ str(r0, MemOperand(r6, 0)); + // Update the write barrier for the array store. + __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ Ret(); + + // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or + // FAST_ELEMENTS, and value is Smi. + __ bind(&smi_element); + __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); + __ Ret(); + + // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. + __ bind(&double_elements); + __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2, + &slow_elements); + __ Ret(); +} + #undef __ } } // namespace v8::internal |