diff options
Diffstat (limited to 'deps/v8/src/arm')
31 files changed, 1232 insertions, 1949 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index d966380c1e..f5612e463c 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -222,7 +222,7 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { } -static const int kNoCodeAgeSequenceLength = 3; +static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize; Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { @@ -234,15 +234,15 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { Code* RelocInfo::code_age_stub() { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); return Code::GetCodeFromTargetAddress( - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1))); + Memory::Address_at(pc_ + + (kNoCodeAgeSequenceLength - Assembler::kInstrSize))); } void RelocInfo::set_code_age_stub(Code* stub) { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1)) = + Memory::Address_at(pc_ + + (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) = stub->instruction_start(); } @@ -323,14 +323,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) { visitor->VisitExternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { visitor->VisitCodeAgeSequence(this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence())) && isolate->debug()->has_break_points()) { visitor->VisitDebugTarget(this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } @@ -350,14 +348,12 @@ void RelocInfo::Visit(Heap* heap) { StaticVisitor::VisitExternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { StaticVisitor::VisitCodeAgeSequence(heap, this); -#ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); -#endif } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 297cdcc039..74fd61979b 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -100,10 +100,11 @@ const char* DwVfpRegister::AllocationIndexToString(int index) { } -void CpuFeatures::Probe() { +void CpuFeatures::Probe(bool serializer_enabled) { uint64_t standard_features = static_cast<unsigned>( OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); - ASSERT(supported_ == 0 || supported_ == standard_features); + ASSERT(supported_ == 0 || + (supported_ & standard_features) == standard_features); #ifdef DEBUG initialized_ = true; #endif @@ -113,10 +114,8 @@ void CpuFeatures::Probe() { // snapshot. supported_ |= standard_features; - if (Serializer::enabled()) { + if (serializer_enabled) { // No probing for features if we might serialize (generate snapshot). - printf(" "); - PrintFeatures(); return; } @@ -1077,15 +1076,11 @@ static bool fits_shifter(uint32_t imm32, // if they can be encoded in the ARM's 12 bits of immediate-offset instruction // space. There is no guarantee that the relocated location can be similarly // encoded. -bool Operand::must_output_reloc_info(const Assembler* assembler) const { +bool Operand::must_output_reloc_info(Isolate* isolate, + const Assembler* assembler) const { if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif // def DEBUG if (assembler != NULL && assembler->predictable_code_size()) return true; - return Serializer::enabled(); + return Serializer::enabled(isolate); } else if (RelocInfo::IsNone(rmode_)) { return false; } @@ -1093,7 +1088,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const { } -static bool use_mov_immediate_load(const Operand& x, +static bool use_mov_immediate_load(Isolate* isolate, + const Operand& x, const Assembler* assembler) { if (assembler != NULL && !assembler->can_use_constant_pool()) { // If there is no constant pool available, we must use an mov immediate. @@ -1104,7 +1100,7 @@ static bool use_mov_immediate_load(const Operand& x, (assembler == NULL || !assembler->predictable_code_size())) { // Prefer movw / movt to constant pool if it is more efficient on the CPU. return true; - } else if (x.must_output_reloc_info(assembler)) { + } else if (x.must_output_reloc_info(isolate, assembler)) { // Prefer constant pool if data is likely to be patched. return false; } else { @@ -1114,17 +1110,18 @@ static bool use_mov_immediate_load(const Operand& x, } -bool Operand::is_single_instruction(const Assembler* assembler, +bool Operand::is_single_instruction(Isolate* isolate, + const Assembler* assembler, Instr instr) const { if (rm_.is_valid()) return true; uint32_t dummy1, dummy2; - if (must_output_reloc_info(assembler) || + if (must_output_reloc_info(isolate, assembler) || !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { // The immediate operand cannot be encoded as a shifter operand, or use of // constant pool is required. For a mov instruction not setting the // condition code additional instruction conventions can be used. if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - return !use_mov_immediate_load(*this, assembler); + return !use_mov_immediate_load(isolate, *this, assembler); } else { // If this is not a mov or mvn instruction there will always an additional // instructions - either mov or ldr. The mov might actually be two @@ -1144,15 +1141,16 @@ void Assembler::move_32_bit_immediate(Register rd, const Operand& x, Condition cond) { RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); - if (x.must_output_reloc_info(this)) { + if (x.must_output_reloc_info(isolate(), this)) { RecordRelocInfo(rinfo); } - if (use_mov_immediate_load(x, this)) { + if (use_mov_immediate_load(isolate(), x, this)) { Register target = rd.code() == pc.code() ? ip : rd; // TODO(rmcilroy): add ARMv6 support for immediate loads. ASSERT(CpuFeatures::IsSupported(ARMv7)); - if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) { + if (!FLAG_enable_ool_constant_pool && + x.must_output_reloc_info(isolate(), this)) { // Make sure the movw/movt doesn't get separated. BlockConstPoolFor(2); } @@ -1180,7 +1178,7 @@ void Assembler::addrmod1(Instr instr, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (x.must_output_reloc_info(this) || + if (x.must_output_reloc_info(isolate(), this) || !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { // The immediate operand cannot be encoded as a shifter operand, so load // it first to register ip and change the original instruction to use ip. @@ -1862,7 +1860,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (src.must_output_reloc_info(this) || + if (src.must_output_reloc_info(isolate(), this) || !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { // Immediate operand cannot be encoded, load it first to register ip. move_32_bit_immediate(ip, src); @@ -2827,8 +2825,9 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst, ASSERT(CpuFeatures::IsSupported(VFP3)); int vd, d; dst.split_code(&vd, &d); - int i = ((32 - fraction_bits) >> 4) & 1; - int imm4 = (32 - fraction_bits) & 0xf; + int imm5 = 32 - fraction_bits; + int i = imm5 & 1; + int imm4 = (imm5 >> 1) & 0xf; emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4); } @@ -3161,9 +3160,7 @@ void Assembler::RecordComment(const char* msg) { void Assembler::RecordConstPool(int size) { // We only need this for debugger support, to correctly compute offsets in the // code. -#ifdef ENABLE_DEBUGGER_SUPPORT RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); -#endif } @@ -3266,12 +3263,7 @@ void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { if (!RelocInfo::IsNone(rinfo.rmode())) { // Don't record external references unless the heap will be serialized. if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !emit_debug_code()) { + if (!Serializer::enabled(isolate()) && !emit_debug_code()) { return; } } @@ -3502,7 +3494,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { // data bool found = false; - if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) { + if (!Serializer::enabled(isolate()) && + (rinfo.rmode() >= RelocInfo::CELL)) { for (int j = 0; j < i; j++) { RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j]; @@ -3547,14 +3540,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { } -MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { - ASSERT(FLAG_enable_ool_constant_pool); - return constant_pool_builder_.Allocate(heap); +Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { + if (!FLAG_enable_ool_constant_pool) { + return isolate->factory()->empty_constant_pool_array(); + } + return constant_pool_builder_.New(isolate); } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { - ASSERT(FLAG_enable_ool_constant_pool); constant_pool_builder_.Populate(this, constant_pool); } @@ -3605,7 +3599,7 @@ void ConstantPoolBuilder::AddEntry(Assembler* assm, // Try to merge entries which won't be patched. int merged_index = -1; if (RelocInfo::IsNone(rmode) || - (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) { + (!Serializer::enabled(assm->isolate()) && (rmode >= RelocInfo::CELL))) { size_t i; std::vector<RelocInfo>::const_iterator it; for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { @@ -3654,12 +3648,14 @@ void ConstantPoolBuilder::Relocate(int pc_delta) { } -MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) { +Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { if (IsEmpty()) { - return heap->empty_constant_pool_array(); + return isolate->factory()->empty_constant_pool_array(); } else { - return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_, - count_of_heap_ptr_, count_of_32bit_); + return isolate->factory()->NewConstantPoolArray(count_of_64bit_, + count_of_code_ptr_, + count_of_heap_ptr_, + count_of_32bit_); } } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 727b054211..1c6a7f04f8 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -56,7 +56,7 @@ class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). - static void Probe(); + static void Probe(bool serializer_enabled); // Display target use when compiling. static void PrintTarget(); @@ -70,15 +70,11 @@ class CpuFeatures : public AllStatic { return Check(f, supported_); } - static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { - ASSERT(initialized_); - return Check(f, found_by_runtime_probing_only_); - } - - static bool IsSafeForSnapshot(CpuFeature f) { + static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) { return Check(f, cross_compile_) || (IsSupported(f) && - (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); + !(Serializer::enabled(isolate) && + Check(f, found_by_runtime_probing_only_))); } static unsigned cache_line_size() { return cache_line_size_; } @@ -93,6 +89,8 @@ class CpuFeatures : public AllStatic { (cross_compile_ & mask) == mask; } + static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(VFP3); } + private: static bool Check(CpuFeature f, unsigned set) { return (set & flag2set(f)) != 0; @@ -590,8 +588,11 @@ class Operand BASE_EMBEDDED { // the instruction this operand is used for is a MOV or MVN instruction the // actual instruction to use is required for this calculation. For other // instructions instr is ignored. - bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const; - bool must_output_reloc_info(const Assembler* assembler) const; + bool is_single_instruction(Isolate* isolate, + const Assembler* assembler, + Instr instr = 0) const; + bool must_output_reloc_info(Isolate* isolate, + const Assembler* assembler) const; inline int32_t immediate() const { ASSERT(!rm_.is_valid()); @@ -714,7 +715,7 @@ class ConstantPoolBuilder BASE_EMBEDDED { void AddEntry(Assembler* assm, const RelocInfo& rinfo); void Relocate(int pc_delta); bool IsEmpty(); - MaybeObject* Allocate(Heap* heap); + Handle<ConstantPoolArray> New(Isolate* isolate); void Populate(Assembler* assm, ConstantPoolArray* constant_pool); inline int count_of_64bit() const { return count_of_64bit_; } @@ -728,6 +729,8 @@ class ConstantPoolBuilder BASE_EMBEDDED { bool IsCodePtrEntry(RelocInfo::Mode rmode); bool IsHeapPtrEntry(RelocInfo::Mode rmode); + // TODO(rmcilroy): This should ideally be a ZoneList, however that would mean + // RelocInfo would need to subclass ZoneObject which it currently doesn't. std::vector<RelocInfo> entries_; std::vector<int> merged_indexes_; int count_of_64bit_; @@ -1498,7 +1501,7 @@ class Assembler : public AssemblerBase { void CheckConstPool(bool force_emit, bool require_jump); // Allocate a constant pool of the correct size for the generated code. - MaybeObject* AllocateConstantPool(Heap* heap); + Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index f138146417..2e5cc7398c 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -376,14 +353,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Label rt_call, allocated; if (FLAG_inline_new) { Label undo_allocation; -#ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = ExternalReference::debug_step_in_fp_address(isolate); __ mov(r2, Operand(debug_step_in_fp)); __ ldr(r2, MemOperand(r2)); __ tst(r2, r2); __ b(ne, &rt_call); -#endif // Load the initial map and verify that it is in fact a map. // r1: constructor function @@ -807,7 +782,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, if (is_construct) { // No type feedback cell is available __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); __ CallStub(&stub); } else { ParameterCount actual(r0); @@ -923,7 +898,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); // Jump to point after the code-age stub. - __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize)); + __ add(r0, r0, Operand(kNoCodeAgeSequenceLength)); __ mov(pc, r0); } @@ -1284,7 +1259,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Out of stack space. __ ldr(r1, MemOperand(fp, kFunctionOffset)); __ Push(r1, r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); // End of stack check. // Push current limit and index. @@ -1407,6 +1382,26 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { } +static void ArgumentAdaptorStackCheck(MacroAssembler* masm, + Label* stack_overflow) { + // ----------- S t a t e ------------- + // -- r0 : actual number of arguments + // -- r1 : function (passed through to callee) + // -- r2 : expected number of arguments + // ----------------------------------- + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); + // Make r5 the space we have left. The stack might already be overflowed + // here which will cause r5 to become negative. + __ sub(r5, sp, r5); + // Check if the arguments will overflow the stack. + __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2)); + __ b(le, stack_overflow); // Signed comparison. +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ SmiTag(r0); __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); @@ -1446,6 +1441,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // -- r2 : expected number of arguments // ----------------------------------- + Label stack_overflow; + ArgumentAdaptorStackCheck(masm, &stack_overflow); Label invoke, dont_adapt_arguments; Label enough, too_few; @@ -1545,6 +1542,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ------------------------------------------- __ bind(&dont_adapt_arguments); __ Jump(r3); + + __ bind(&stack_overflow); + { + FrameScope frame(masm, StackFrame::MANUAL); + EnterArgumentsAdaptorFrame(masm); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + __ bkpt(0); + } } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 832296b273..7b2935106f 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -39,7 +16,6 @@ namespace internal { void FastNewClosureStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r2 }; descriptor->register_param_count_ = 1; @@ -50,7 +26,6 @@ void FastNewClosureStub::InitializeInterfaceDescriptor( void FastNewContextStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1 }; descriptor->register_param_count_ = 1; @@ -60,7 +35,6 @@ void FastNewContextStub::InitializeInterfaceDescriptor( void ToNumberStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0 }; descriptor->register_param_count_ = 1; @@ -70,7 +44,6 @@ void ToNumberStub::InitializeInterfaceDescriptor( void NumberToStringStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0 }; descriptor->register_param_count_ = 1; @@ -81,7 +54,6 @@ void NumberToStringStub::InitializeInterfaceDescriptor( void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r3, r2, r1 }; descriptor->register_param_count_ = 3; @@ -93,7 +65,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r3, r2, r1, r0 }; descriptor->register_param_count_ = 4; @@ -104,7 +75,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( void CreateAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r2, r3 }; descriptor->register_param_count_ = 2; @@ -114,7 +84,6 @@ void CreateAllocationSiteStub::InitializeInterfaceDescriptor( void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1, r0 }; descriptor->register_param_count_ = 2; @@ -125,7 +94,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1, r0 }; descriptor->register_param_count_ = 2; @@ -136,7 +104,6 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( void RegExpConstructResultStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r2, r1, r0 }; descriptor->register_param_count_ = 3; @@ -147,7 +114,6 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor( void LoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0 }; descriptor->register_param_count_ = 1; @@ -157,7 +123,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor( void KeyedLoadFieldStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1 }; descriptor->register_param_count_ = 1; @@ -167,7 +132,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor( void StringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0, r2 }; descriptor->register_param_count_ = 2; @@ -177,7 +141,6 @@ void StringLengthStub::InitializeInterfaceDescriptor( void KeyedStringLengthStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1, r0 }; descriptor->register_param_count_ = 2; @@ -187,7 +150,6 @@ void KeyedStringLengthStub::InitializeInterfaceDescriptor( void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r2, r1, r0 }; descriptor->register_param_count_ = 3; @@ -198,7 +160,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( void TransitionElementsKindStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0, r1 }; descriptor->register_param_count_ = 2; @@ -210,7 +171,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( void CompareNilICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0 }; descriptor->register_param_count_ = 1; @@ -218,12 +178,11 @@ void CompareNilICStub::InitializeInterfaceDescriptor( descriptor->deoptimization_handler_ = FUNCTION_ADDR(CompareNilIC_Miss); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate())); } static void InitializeArrayConstructorDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state @@ -252,7 +211,6 @@ static void InitializeArrayConstructorDescriptor( static void InitializeInternalArrayConstructorDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor, int constant_stack_parameter_count) { // register state @@ -280,28 +238,24 @@ static void InitializeInternalArrayConstructorDescriptor( void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeArrayConstructorDescriptor(descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeArrayConstructorDescriptor(descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeArrayConstructorDescriptor(descriptor, -1); } void ToBooleanStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0 }; descriptor->register_param_count_ = 1; @@ -309,33 +263,29 @@ void ToBooleanStub::InitializeInterfaceDescriptor( descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate())); } void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); + InitializeInternalArrayConstructorDescriptor(descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); + InitializeInternalArrayConstructorDescriptor(descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); + InitializeInternalArrayConstructorDescriptor(descriptor, -1); } void StoreGlobalStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1, r2, r0 }; descriptor->register_param_count_ = 3; @@ -346,7 +296,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor( void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r0, r3, r1, r2 }; descriptor->register_param_count_ = 4; @@ -357,19 +306,17 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( void BinaryOpICStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1, r0 }; descriptor->register_param_count_ = 2; descriptor->register_params_ = registers; descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); descriptor->SetMissHandler( - ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate())); } void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r2, r1, r0 }; descriptor->register_param_count_ = 3; @@ -380,7 +327,6 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( void StringAddStub::InitializeInterfaceDescriptor( - Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { static Register registers[] = { r1, r0 }; descriptor->register_param_count_ = 2; @@ -504,10 +450,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. - Isolate* isolate = masm->isolate(); - isolate->counters()->code_stubs()->Increment(); + isolate()->counters()->code_stubs()->Increment(); - CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); int param_count = descriptor->register_param_count_; { // Call the runtime system in a fresh internal frame. @@ -533,11 +478,13 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { // stub so you don't have to set up the frame. class ConvertToDoubleStub : public PlatformCodeStub { public: - ConvertToDoubleStub(Register result_reg_1, + ConvertToDoubleStub(Isolate* isolate, + Register result_reg_1, Register result_reg_2, Register source_reg, Register scratch_reg) - : result1_(result_reg_1), + : PlatformCodeStub(isolate), + result1_(result_reg_1), result2_(result_reg_2), source_(source_reg), zeros_(scratch_reg) { } @@ -726,10 +673,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - WriteInt32ToHeapNumberStub stub1(r1, r0, r2); - WriteInt32ToHeapNumberStub stub2(r2, r0, r3); - stub1.GetCode(isolate); - stub2.GetCode(isolate); + WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2); + WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3); + stub1.GetCode(); + stub2.GetCode(); } @@ -1124,7 +1071,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if // VFP3 is supported, or in r0, r1, r2, and r3. - Isolate* isolate = masm->isolate(); __ bind(&lhs_not_nan); Label no_nan; // ARMv7 VFP3 instructions to implement double precision comparison. @@ -1187,7 +1133,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); - __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); + __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2, + r3); if (cc == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, @@ -1251,9 +1198,9 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), + ExternalReference::store_buffer_overflow_function(isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { __ RestoreFPRegs(sp, scratch); @@ -1373,7 +1320,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ PrepareCallCFunction(0, 2, scratch); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); } __ pop(lr); @@ -1424,11 +1371,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ vcvt_f64_s32(double_exponent, single_scratch); // Returning or bailing out. - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); if (exponent_type_ == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); + __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1); // The stub is called from non-optimized code, which expects the result // as heap number in exponent. @@ -1447,7 +1394,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ PrepareCallCFunction(0, 2, scratch); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), + ExternalReference::power_double_double_function(isolate()), 0, 2); } __ pop(lr); @@ -1479,61 +1426,57 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { void CodeStub::GenerateFPStubs(Isolate* isolate) { SaveFPRegsMode mode = kSaveFPRegs; - CEntryStub save_doubles(1, mode); - StoreBufferOverflowStub stub(mode); + CEntryStub save_doubles(isolate, 1, mode); + StoreBufferOverflowStub stub(isolate, mode); // These stubs might already be in the snapshot, detect that and don't // regenerate, which would lead to code stub initialization state being messed // up. Code* save_doubles_code; - if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { - save_doubles_code = *save_doubles.GetCode(isolate); + if (!save_doubles.FindCodeInCache(&save_doubles_code)) { + save_doubles_code = *save_doubles.GetCode(); } Code* store_buffer_overflow_code; - if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { - store_buffer_overflow_code = *stub.GetCode(isolate); + if (!stub.FindCodeInCache(&store_buffer_overflow_code)) { + store_buffer_overflow_code = *stub.GetCode(); } isolate->set_fp_stubs_generated(true); } void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode(isolate); + CEntryStub stub(isolate, 1, kDontSaveFPRegs); + stub.GetCode(); } -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - bool do_gc, - bool always_allocate) { - // r0: result parameter for PerformGC, if any - // r4: number of arguments including receiver (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to the first argument (C callee-saved) - Isolate* isolate = masm->isolate(); - - if (do_gc) { - // Passing r0. - __ PrepareCallCFunction(2, 0, r1); - __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); - __ CallCFunction(ExternalReference::perform_gc_function(isolate), - 2, 0); - } +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function. + // r0: number of arguments including receiver + // r1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(isolate); - if (always_allocate) { - __ mov(r0, Operand(scope_depth)); - __ ldr(r1, MemOperand(r0)); - __ add(r1, r1, Operand(1)); - __ str(r1, MemOperand(r0)); - } + ProfileEntryHookStub::MaybeCallEntryHook(masm); - // Call C built-in. - // r0 = argc, r1 = argv - __ mov(r0, Operand(r4)); - __ mov(r1, Operand(r6)); + __ mov(r5, Operand(r1)); + + // Compute the argv pointer in a callee-saved register. + __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); + __ sub(r1, r1, Operand(kPointerSize)); + + // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterExitFrame(save_doubles_); + + // Store a copy of argc in callee-saved registers for later. + __ mov(r4, Operand(r0)); + + // r0, r4: number of arguments including receiver (C callee-saved) + // r1: pointer to the first argument (C callee-saved) + // r5: pointer to builtin function (C callee-saved) + + // Result returned in r0 or r0+r1 by default. #if V8_HOST_ARCH_ARM int frame_alignment = MacroAssembler::ActivationFrameAlignment(); @@ -1551,7 +1494,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } #endif - __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); + // Call C built-in. + // r0 = argc, r1 = argv + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); // To let the GC traverse the return address of the exit frames, we need to // know where the return address is. The CEntryStub is unmovable, so @@ -1570,132 +1515,67 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ VFPEnsureFPSCRState(r2); - if (always_allocate) { - // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 - // though (contain the result). - __ mov(r2, Operand(scope_depth)); - __ ldr(r3, MemOperand(r2)); - __ sub(r3, r3, Operand(1)); - __ str(r3, MemOperand(r2)); + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); + __ b(ne, &okay); + __ stop("The hole escaped"); + __ bind(&okay); } - // check for failure result - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - // Lower 2 bits of r2 are 0 iff r0 has failure tag. - __ add(r2, r0, Operand(1)); - __ tst(r2, Operand(kFailureTagMask)); - __ b(eq, &failure_returned); + // Check result for exception sentinel. + Label exception_returned; + __ CompareRoot(r0, Heap::kExceptionRootIndex); + __ b(eq, &exception_returned); + + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + + // Check that there is no pending exception, otherwise we + // should have returned the exception sentinel. + if (FLAG_debug_code) { + Label okay; + __ mov(r2, Operand(pending_exception_address)); + __ ldr(r2, MemOperand(r2)); + __ CompareRoot(r2, Heap::kTheHoleValueRootIndex); + // Cannot use check here as it attempts to generate call into runtime. + __ b(eq, &okay); + __ stop("Unexpected pending exception"); + __ bind(&okay); + } // Exit C frame and return. // r0:r1: result // sp: stack pointer // fp: frame pointer - // Callee-saved register r4 still holds argc. + // Callee-saved register r4 still holds argc. __ LeaveExitFrame(save_doubles_, r4, true); __ mov(pc, lr); - // check if we should retry or throw exception - Label retry; - __ bind(&failure_returned); - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ b(eq, &retry); + // Handling of exception. + __ bind(&exception_returned); // Retrieve the pending exception. - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); - __ ldr(r0, MemOperand(ip)); + __ mov(r2, Operand(pending_exception_address)); + __ ldr(r0, MemOperand(r2)); // Clear the pending exception. __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); - __ str(r3, MemOperand(ip)); + __ str(r3, MemOperand(r2)); // Special handling of termination exceptions which are uncatchable // by javascript code. - __ LoadRoot(r3, Heap::kTerminationExceptionRootIndex); - __ cmp(r0, r3); - __ b(eq, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // Called from JavaScript; parameters are on stack as if calling JS function - // r0: number of arguments including receiver - // r1: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - - ProfileEntryHookStub::MaybeCallEntryHook(masm); - - // Result returned in r0 or r0+r1 by default. - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - // Compute the argv pointer in a callee-saved register. - __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); - __ sub(r6, r6, Operand(kPointerSize)); - - // Enter the exit frame that transitions from JavaScript to C++. - FrameAndConstantPoolScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(save_doubles_); - - // Set up argc and the builtin function in callee-saved registers. - __ mov(r4, Operand(r0)); - __ mov(r5, Operand(r1)); - - // r4: number of arguments (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to first argument (C callee-saved) - - Label throw_normal_exception; Label throw_termination_exception; + __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); + __ b(eq, &throw_termination_exception); - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - true, - true); - - { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(0, r0); - __ CallCFunction( - ExternalReference::out_of_memory_function(masm->isolate()), 0, 0); - } + // Handle normal exception. + __ Throw(r0); __ bind(&throw_termination_exception); __ ThrowUncatchable(r0); - - __ bind(&throw_normal_exception); - __ Throw(r0); } @@ -1738,15 +1618,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r2: receiver // r3: argc // r4: argv - Isolate* isolate = masm->isolate(); int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; if (FLAG_enable_ool_constant_pool) { - __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array())); + __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array())); } __ mov(r7, Operand(Smi::FromInt(marker))); __ mov(r6, Operand(Smi::FromInt(marker))); __ mov(r5, - Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); + Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); __ ldr(r5, MemOperand(r5)); __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | @@ -1758,7 +1637,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // If this is the outermost JS call, set js_entry_sp value. Label non_outermost_js; - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); __ mov(r5, Operand(ExternalReference(js_entry_sp))); __ ldr(r6, MemOperand(r5)); __ cmp(r6, Operand::Zero()); @@ -1788,10 +1667,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // fp will be invalid because the PushTryHandler below sets it to 0 to // signal the existence of the JSEntry frame. __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); } __ str(r0, MemOperand(ip)); - __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); + __ LoadRoot(r0, Heap::kExceptionRootIndex); __ b(&exit); // Invoke: Link this frame into the handler chain. There's only one @@ -1805,9 +1684,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(r5, Operand(isolate->factory()->the_hole_value())); + __ mov(r5, Operand(isolate()->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ str(r5, MemOperand(ip)); // Invoke the function by calling through JS entry trampoline builtin. @@ -1822,10 +1701,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r4: argv if (is_construct) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, - isolate); + isolate()); __ mov(ip, Operand(construct_entry)); } else { - ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); + ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); __ mov(ip, Operand(entry)); } __ ldr(ip, MemOperand(ip)); // deref address @@ -1851,7 +1730,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Restore the top frame descriptors from the stack. __ pop(r3); __ mov(ip, - Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); + Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); __ str(r3, MemOperand(ip)); // Reset the stack to the callee saved registers. @@ -2010,7 +1889,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); // Null is not instance of anything. - __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); + __ cmp(scratch, Operand(isolate()->factory()->null_value())); __ b(ne, &object_not_null); __ mov(r0, Operand(Smi::FromInt(1))); __ Ret(HasArgsInRegisters() ? 0 : 2); @@ -2057,7 +1936,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { // -- r0 : key // -- r1 : receiver // ----------------------------------- - __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string())); + __ cmp(r0, Operand(isolate()->factory()->prototype_string())); __ b(ne, &miss); receiver = r1; } else { @@ -2487,11 +2366,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Register last_match_info_elements = no_reg; // will be r6; // Ensure that a RegExp stack is allocated. - Isolate* isolate = masm->isolate(); ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(isolate); + ExternalReference::address_of_regexp_stack_memory_address(isolate()); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(isolate); + ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ mov(r0, Operand(address_of_regexp_stack_memory_size)); __ ldr(r0, MemOperand(r0, 0)); __ cmp(r0, Operand::Zero()); @@ -2633,7 +2511,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: Subject string // regexp_data: RegExp data (FixedArray) // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); + __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2); // Isolates: note we add an additional parameter here (isolate pointer). const int kRegExpExecuteArguments = 9; @@ -2644,7 +2522,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Arguments are before that on the stack or in registers. // Argument 9 (sp[20]): Pass current isolate address. - __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); + __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); __ str(r0, MemOperand(sp, 5 * kPointerSize)); // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. @@ -2666,7 +2544,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Argument 5 (sp[4]): static offsets vector buffer. __ mov(r0, - Operand(ExternalReference::address_of_static_offsets_vector(isolate))); + Operand(ExternalReference::address_of_static_offsets_vector( + isolate()))); __ str(r0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data and @@ -2697,7 +2576,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Locate the code entry and call it. __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm, r6); __ LeaveExitFrame(false, no_reg, true); @@ -2724,9 +2603,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(isolate->factory()->the_hole_value())); + __ mov(r1, Operand(isolate()->factory()->the_hole_value())); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + isolate()))); __ ldr(r0, MemOperand(r2, 0)); __ cmp(r0, r1); __ b(eq, &runtime); @@ -2746,7 +2625,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&failure); // For failure and exception return null. - __ mov(r0, Operand(masm->isolate()->factory()->null_value())); + __ mov(r0, Operand(isolate()->factory()->null_value())); __ add(sp, sp, Operand(4 * kPointerSize)); __ Ret(); @@ -2808,7 +2687,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(isolate); + ExternalReference::address_of_static_offsets_vector(isolate()); __ mov(r2, Operand(address_of_static_offsets_vector)); // r1: number of capture registers @@ -2953,7 +2832,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { __ SmiTag(r0); __ Push(r3, r2, r1, r0); - CreateAllocationSiteStub create_stub; + CreateAllocationSiteStub create_stub(masm->isolate()); __ CallStub(&create_stub); __ Pop(r3, r2, r1, r0); @@ -2977,11 +2856,62 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { + // Do not transform the receiver for strict mode functions. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, cont); + + // Do not transform the receiver for native (Compilerhints already in r3). + __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ b(ne, cont); +} + + +static void EmitSlowCase(MacroAssembler* masm, + int argc, + Label* non_function) { + // Check for function proxy. + __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(ne, non_function); + __ push(r1); // put proxy as additional argument + __ mov(r0, Operand(argc + 1, RelocInfo::NONE32)); + __ mov(r2, Operand::Zero()); + __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ bind(non_function); + __ str(r1, MemOperand(sp, argc * kPointerSize)); + __ mov(r0, Operand(argc)); // Set up the number of arguments. + __ mov(r2, Operand::Zero()); + __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); +} + + +static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { + // Wrap the receiver and patch it back onto the stack. + { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); + __ Push(r1, r3); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ pop(r1); + } + __ str(r0, MemOperand(sp, argc * kPointerSize)); + __ jmp(cont); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // r1 : the function to call - // r2 : feedback vector - // r3 : (only if r2 is not the megamorphic symbol) slot in feedback - // vector (Smi) Label slow, non_function, wrap, cont; if (NeedsChecks()) { @@ -2992,36 +2922,20 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Goto slow case if we do not have a function. __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); __ b(ne, &slow); - - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); - // Type information was updated. Because we may call Array, which - // expects either undefined or an AllocationSite in ebx we need - // to set ebx to undefined. - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - } } // Fast-case: Invoke the function now. // r1: pushed function - ParameterCount actual(argc_); + int argc = argc_; + ParameterCount actual(argc); if (CallAsMethod()) { if (NeedsChecks()) { - // Do not transform the receiver for strict mode functions. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, &cont); - - // Do not transform the receiver for native (Compilerhints already in r3). - __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &cont); + EmitContinueIfStrictOrNative(masm, &cont); } // Compute the receiver in sloppy mode. - __ ldr(r3, MemOperand(sp, argc_ * kPointerSize)); + __ ldr(r3, MemOperand(sp, argc * kPointerSize)); if (NeedsChecks()) { __ JumpIfSmi(r3, &wrap); @@ -3033,55 +2947,18 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ bind(&cont); } + __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper()); if (NeedsChecks()) { // Slow-case: Non-function called. __ bind(&slow); - if (RecordCallTarget()) { - // If there is a call target cache, mark it megamorphic in the - // non-function case. MegamorphicSentinel is an immortal immovable - // object (megamorphic symbol) so no write barrier is needed. - ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), - masm->isolate()->heap()->megamorphic_symbol()); - __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3)); - __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex); - __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize)); - } - // Check for function proxy. - __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(ne, &non_function); - __ push(r1); // put proxy as additional argument - __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); - __ mov(r2, Operand::Zero()); - __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY); - { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); - __ Jump(adaptor, RelocInfo::CODE_TARGET); - } - - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ bind(&non_function); - __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Set up the number of arguments. - __ mov(r2, Operand::Zero()); - __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + EmitSlowCase(masm, argc, &non_function); } if (CallAsMethod()) { __ bind(&wrap); - // Wrap the receiver and patch it back onto the stack. - { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); - __ Push(r1, r3); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ pop(r1); - } - __ str(r0, MemOperand(sp, argc_ * kPointerSize)); - __ jmp(&cont); + EmitWrapCase(masm, argc, &cont); } } @@ -3145,11 +3022,114 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). __ mov(r2, Operand::Zero()); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); } +static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { + __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ldr(vector, FieldMemOperand(vector, + JSFunction::kSharedFunctionInfoOffset)); + __ ldr(vector, FieldMemOperand(vector, + SharedFunctionInfo::kFeedbackVectorOffset)); +} + + +void CallICStub::Generate(MacroAssembler* masm) { + // r1 - function + // r3 - slot id (Smi) + Label extra_checks_or_miss, slow_start; + Label slow, non_function, wrap, cont; + Label have_js_function; + int argc = state_.arg_count(); + ParameterCount actual(argc); + + EmitLoadTypeFeedbackVector(masm, r2); + + // The checks. First, does r1 match the recorded monomorphic target? + __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); + __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); + __ cmp(r1, r4); + __ b(ne, &extra_checks_or_miss); + + __ bind(&have_js_function); + if (state_.CallAsMethod()) { + EmitContinueIfStrictOrNative(masm, &cont); + // Compute the receiver in sloppy mode. + __ ldr(r3, MemOperand(sp, argc * kPointerSize)); + + __ JumpIfSmi(r3, &wrap); + __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); + __ b(lt, &wrap); + + __ bind(&cont); + } + + __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper()); + + __ bind(&slow); + EmitSlowCase(masm, argc, &non_function); + + if (state_.CallAsMethod()) { + __ bind(&wrap); + EmitWrapCase(masm, argc, &cont); + } + + __ bind(&extra_checks_or_miss); + Label miss; + + __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex); + __ b(eq, &slow_start); + __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex); + __ b(eq, &miss); + + if (!FLAG_trace_ic) { + // We are going megamorphic, and we don't want to visit the runtime. + __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); + __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex); + __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize)); + __ jmp(&slow_start); + } + + // We are here because tracing is on or we are going monomorphic. + __ bind(&miss); + GenerateMiss(masm); + + // the slow case + __ bind(&slow_start); + // Check that the function is really a JavaScript function. + // r1: pushed function (to be verified) + __ JumpIfSmi(r1, &non_function); + + // Goto slow case if we do not have a function. + __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); + __ b(ne, &slow); + __ jmp(&have_js_function); +} + + +void CallICStub::GenerateMiss(MacroAssembler* masm) { + // Get the receiver of the function from the stack; 1 ~ return address. + __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); + + { + FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + + // Push the receiver and the function and feedback info. + __ Push(r4, r1, r2, r3); + + // Call the entry. + ExternalReference miss = ExternalReference(IC_Utility(IC::kCallIC_Miss), + masm->isolate()); + __ CallExternalReference(miss, 4); + + // Move result to edi and exit the internal frame. + __ mov(r1, r0); + } +} + + // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -3512,10 +3492,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); - // Do a JumpIfSmi, but fold its jump into the subsequent string test. - __ SmiTst(r0); - Condition is_string = masm->IsObjectStringType(r0, r1, ne); - ASSERT(is_string == eq); + __ JumpIfSmi(r0, &runtime); + Condition is_string = masm->IsObjectStringType(r0, r1); __ b(NegateCondition(is_string), &runtime); Label single_char; @@ -3673,7 +3651,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED); __ bind(&return_r0); - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); __ Drop(3); __ Ret(); @@ -3808,7 +3786,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( void StringCompareStub::Generate(MacroAssembler* masm) { Label runtime; - Counters* counters = masm->isolate()->counters(); + Counters* counters = isolate()->counters(); // Stack frame on entry. // sp[0]: right string @@ -3842,223 +3820,17 @@ void StringCompareStub::Generate(MacroAssembler* masm) { } -void ArrayPushStub::Generate(MacroAssembler* masm) { - Register receiver = r0; - Register scratch = r1; - - int argc = arguments_count(); - - if (argc == 0) { - // Nothing to do, just return the length. - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Drop(argc + 1); - __ Ret(); - return; - } - - Isolate* isolate = masm->isolate(); - - if (argc != 1) { - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - Label call_builtin, attempt_to_grow_elements, with_write_barrier; - - Register elements = r6; - Register end_elements = r5; - // Get the elements array of the object. - __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - scratch, - Heap::kFixedArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); - } - - // Get the array's length into scratch and calculate new length. - __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ add(scratch, scratch, Operand(Smi::FromInt(argc))); - - // Get the elements' length. - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Check if we could survive without allocation. - __ cmp(scratch, r4); - - const int kEndElementsOffset = - FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; - - if (IsFastSmiOrObjectElementsKind(elements_kind())) { - __ b(gt, &attempt_to_grow_elements); - - // Check if value is a smi. - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ JumpIfNotSmi(r4, &with_write_barrier); - - // Store the value. - // We may need a register containing the address end_elements below, so - // write back the value in end_elements. - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - } else { - // Check if we could survive without allocation. - __ cmp(scratch, r4); - __ b(gt, &call_builtin); - - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0, - &call_builtin, argc * kDoubleSize); - } - - // Save new length. - __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Drop(argc + 1); - __ mov(r0, scratch); - __ Ret(); - - if (IsFastDoubleElementsKind(elements_kind())) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ bind(&with_write_barrier); - - if (IsFastSmiElementsKind(elements_kind())) { - if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); - - __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r9, ip); - __ b(eq, &call_builtin); - - ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) - ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); - __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); - __ ldr(r3, ContextOperand(r3, Context::JS_ARRAY_MAPS_INDEX)); - const int header_size = FixedArrayBase::kHeaderSize; - // Verify that the object can be transitioned in place. - const int origin_offset = header_size + elements_kind() * kPointerSize; - __ ldr(r2, FieldMemOperand(receiver, origin_offset)); - __ ldr(ip, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ cmp(r2, ip); - __ b(ne, &call_builtin); - - const int target_offset = header_size + target_kind * kPointerSize; - __ ldr(r3, FieldMemOperand(r3, target_offset)); - __ mov(r2, receiver); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - masm, DONT_TRACK_ALLOCATION_SITE, NULL); - } - - // Save new length. - __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Store the value. - // We may need a register containing the address end_elements below, so write - // back the value in end_elements. - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch)); - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); - - __ RecordWrite(elements, - end_elements, - r4, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ Drop(argc + 1); - __ mov(r0, scratch); - __ Ret(); - - __ bind(&attempt_to_grow_elements); - // scratch: array's length + 1. - - if (!FLAG_inline_new) { - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); - return; - } - - __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case the - // new element is non-Smi. For now, delegate to the builtin. - if (IsFastSmiElementsKind(elements_kind())) { - __ JumpIfNotSmi(r2, &call_builtin); - } - - // We could be lucky and the elements array could be at the top of new-space. - // In this case we can just grow it in place by moving the allocation pointer - // up. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); - - const int kAllocationDelta = 4; - ASSERT(kAllocationDelta >= argc); - // Load top and check if it is the end of elements. - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch)); - __ add(end_elements, end_elements, Operand(kEndElementsOffset)); - __ mov(r4, Operand(new_space_allocation_top)); - __ ldr(r3, MemOperand(r4)); - __ cmp(end_elements, r3); - __ b(ne, &call_builtin); - - __ mov(r9, Operand(new_space_allocation_limit)); - __ ldr(r9, MemOperand(r9)); - __ add(r3, r3, Operand(kAllocationDelta * kPointerSize)); - __ cmp(r3, r9); - __ b(hi, &call_builtin); - - // We fit and could grow elements. - // Update new_space_allocation_top. - __ str(r3, MemOperand(r4)); - // Push the argument. - __ str(r2, MemOperand(end_elements)); - // Fill the rest with holes. - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - for (int i = 1; i < kAllocationDelta; i++) { - __ str(r3, MemOperand(end_elements, i * kPointerSize)); - } - - // Update elements' and array's sizes. - __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta))); - __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Elements are in new space, so write barrier is not required. - __ Drop(argc + 1); - __ mov(r0, scratch); - __ Ret(); - - __ bind(&call_builtin); - __ TailCallExternalReference( - ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); -} - - void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r1 : left // -- r0 : right // -- lr : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); // Load r2 with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ Move(r2, handle(isolate->heap()->undefined_value())); + __ Move(r2, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { @@ -4074,7 +3846,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { // Tail call into the stub that handles binary operations with allocation // sites. - BinaryOpWithAllocationSiteStub stub(state_); + BinaryOpWithAllocationSiteStub stub(isolate(), state_); __ TailCallStub(&stub); } @@ -4152,9 +3924,9 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ bind(&unordered); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, CompareIC::GENERIC); - __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { @@ -4377,7 +4149,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { { // Call the runtime system in a fresh internal frame. ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); __ Push(r1, r0); @@ -4409,7 +4181,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) { void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { intptr_t code = - reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); + reinterpret_cast<intptr_t>(GetCode().location()); __ Move(ip, target); __ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); __ blx(lr); // Call the stub. @@ -4485,7 +4257,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ stm(db_w, sp, spill_mask); __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ mov(r1, Operand(Handle<Name>(name))); - NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); __ CallStub(&stub); __ cmp(r0, Operand::Zero()); __ ldm(ia_w, sp, spill_mask); @@ -4561,7 +4333,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ Move(r0, elements); __ Move(r1, name); } - NameDictionaryLookupStub stub(POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); __ CallStub(&stub); __ cmp(r0, Operand::Zero()); __ mov(scratch2, Operand(r2)); @@ -4665,11 +4437,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode(isolate); + StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); + stub1.GetCode(); // Hydrogen code stubs need stub2 at snapshot time. - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode(isolate); + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); + stub2.GetCode(); } @@ -4774,12 +4546,11 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { __ Move(address, regs_.address()); __ Move(r0, regs_.object()); __ Move(r1, address); - __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), + ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); } @@ -4934,8 +4705,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); - __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); + __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ ldr(r1, MemOperand(fp, parameter_count_offset)); @@ -4951,8 +4722,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { - PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); - ProfileEntryHookStub stub; + ProfileEntryHookStub stub(masm->isolate()); + int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize; + PredictableCodeSizeScope predictable(masm, code_size); __ push(lr); __ CallStub(&stub); __ pop(lr); @@ -4998,18 +4770,18 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { #if V8_HOST_ARCH_ARM int32_t entry_hook = - reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook()); + reinterpret_cast<int32_t>(isolate()->function_entry_hook()); __ mov(ip, Operand(entry_hook)); #else // Under the simulator we need to indirect the entry hook through a // trampoline function at a known address. // It additionally takes an isolate as a third parameter - __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); __ mov(ip, Operand(ExternalReference(&dispatcher, ExternalReference::BUILTIN_CALL, - masm->isolate()))); + isolate()))); #endif __ Call(ip); @@ -5027,7 +4799,7 @@ template<class T> static void CreateArrayDispatch(MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (mode == DISABLE_ALLOCATION_SITES) { - T stub(GetInitialFastElementsKind(), mode); + T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { int last_index = GetSequenceIndexFromFastElementsKind( @@ -5035,7 +4807,7 @@ static void CreateArrayDispatch(MacroAssembler* masm, for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(r3, Operand(kind)); - T stub(kind); + T stub(masm->isolate(), kind); __ TailCallStub(&stub, eq); } @@ -5077,12 +4849,14 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); - ArraySingleArgumentConstructorStub stub_holey(holey_initial, + ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), + holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(initial, + ArraySingleArgumentConstructorStub stub(masm->isolate(), + initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { @@ -5110,7 +4884,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(r3, Operand(kind)); - ArraySingleArgumentConstructorStub stub(kind); + ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); __ TailCallStub(&stub, eq); } @@ -5128,11 +4902,11 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); - T stub(kind); - stub.GetCode(isolate); + T stub(isolate, kind); + stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { - T stub1(kind, DISABLE_ALLOCATION_SITES); - stub1.GetCode(isolate); + T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); + stub1.GetCode(); } } } @@ -5153,12 +4927,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things - InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); - stubh1.GetCode(isolate); - InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); - stubh2.GetCode(isolate); - InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); - stubh3.GetCode(isolate); + InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); + stubh1.GetCode(); + InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); + stubh2.GetCode(); + InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); + stubh3.GetCode(); } } @@ -5236,10 +5010,10 @@ void InternalArrayConstructorStub::GenerateCase( MacroAssembler* masm, ElementsKind kind) { __ cmp(r0, Operand(1)); - InternalArrayNoArgumentConstructorStub stub0(kind); + InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0, lo); - InternalArrayNArgumentsConstructorStub stubN(kind); + InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN, hi); if (IsFastPackedElementsKind(kind)) { @@ -5249,11 +5023,11 @@ void InternalArrayConstructorStub::GenerateCase( __ cmp(r3, Operand::Zero()); InternalArraySingleArgumentConstructorStub - stub1_holey(GetHoleyElementsKind(kind)); + stub1_holey(isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey, ne); } - InternalArraySingleArgumentConstructorStub stub1(kind); + InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); } @@ -5342,8 +5116,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(FCA::kHolderIndex == 0); STATIC_ASSERT(FCA::kArgsLength == 7); - Isolate* isolate = masm->isolate(); - // context save __ push(context); // load context from callee @@ -5365,7 +5137,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { __ push(scratch); // isolate __ mov(scratch, - Operand(ExternalReference::isolate_address(isolate))); + Operand(ExternalReference::isolate_address(isolate()))); __ push(scratch); // holder __ push(holder); @@ -5377,7 +5149,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { // it's not controlled by GC. const int kApiStackSpace = 4; - FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL); + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); ASSERT(!api_function_address.is(r0) && !scratch.is(r0)); @@ -5397,11 +5169,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { __ str(ip, MemOperand(r0, 3 * kPointerSize)); const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); - ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(isolate()); AllowExternalCallThatCantCauseGC scope(masm); MemOperand context_restore_operand( @@ -5437,7 +5206,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA const int kApiStackSpace = 1; - FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL); + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // Create PropertyAccessorInfo instance on the stack above the exit frame with @@ -5447,12 +5216,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; - Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); - ExternalReference::Type thunk_type = - ExternalReference::PROFILING_GETTER_CALL; - ApiFunction thunk_fun(thunk_address); - ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, - masm->isolate()); + ExternalReference thunk_ref = + ExternalReference::invoke_accessor_getter_callback(isolate()); __ CallApiFunctionAndReturn(api_function_address, thunk_ref, kStackUnwindSpace, diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index ef78802bef..3237b3af41 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_CODE_STUBS_ARM_H_ #define V8_ARM_CODE_STUBS_ARM_H_ @@ -39,8 +16,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); class StoreBufferOverflowStub: public PlatformCodeStub { public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) {} + StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp) + : PlatformCodeStub(isolate), save_doubles_(save_fp) {} void Generate(MacroAssembler* masm); @@ -91,7 +68,7 @@ class StringHelper : public AllStatic { class SubStringStub: public PlatformCodeStub { public: - SubStringStub() {} + explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {} private: Major MajorKey() { return SubString; } @@ -104,7 +81,7 @@ class SubStringStub: public PlatformCodeStub { class StringCompareStub: public PlatformCodeStub { public: - StringCompareStub() { } + explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { } // Compares two flat ASCII strings and returns result in r0. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, @@ -144,10 +121,12 @@ class StringCompareStub: public PlatformCodeStub { // so you don't have to set up the frame. class WriteInt32ToHeapNumberStub : public PlatformCodeStub { public: - WriteInt32ToHeapNumberStub(Register the_int, + WriteInt32ToHeapNumberStub(Isolate* isolate, + Register the_int, Register the_heap_number, Register scratch) - : the_int_(the_int), + : PlatformCodeStub(isolate), + the_int_(the_int), the_heap_number_(the_heap_number), scratch_(scratch) { } @@ -177,12 +156,14 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub { class RecordWriteStub: public PlatformCodeStub { public: - RecordWriteStub(Register object, + RecordWriteStub(Isolate* isolate, + Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) - : object_(object), + : PlatformCodeStub(isolate), + object_(object), value_(value), address_(address), remembered_set_action_(remembered_set_action), @@ -363,7 +344,7 @@ class RecordWriteStub: public PlatformCodeStub { // moved by GC class DirectCEntryStub: public PlatformCodeStub { public: - DirectCEntryStub() {} + explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} void Generate(MacroAssembler* masm); void GenerateCall(MacroAssembler* masm, Register target); @@ -379,7 +360,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) + : PlatformCodeStub(isolate), mode_(mode) { } void Generate(MacroAssembler* masm); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index cfc9dfec4d..8a46006eb9 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -102,13 +79,11 @@ UnaryMathFunction CreateExpFunction() { #if defined(V8_HOST_ARCH_ARM) OS::MemCopyUint8Function CreateMemCopyUint8Function( - OS::MemCopyUint8Function stub) { + OS::MemCopyUint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else - if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { - return stub; - } + if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub; size_t actual_size; byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return stub; @@ -260,13 +235,11 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function( // Convert 8 to 16. The number of character to copy must be at least 8. OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( - OS::MemCopyUint16Uint8Function stub) { + OS::MemCopyUint16Uint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else - if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { - return stub; - } + if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub; size_t actual_size; byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); if (buffer == NULL) return stub; @@ -849,47 +822,46 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008; #endif -static byte* GetNoCodeAgeSequence(uint32_t* length) { - // The sequence of instructions that is patched out for aging code is the - // following boilerplate stack-building prologue that is found in FUNCTIONS - static bool initialized = false; - static uint32_t sequence[kNoCodeAgeSequenceLength]; - byte* byte_sequence = reinterpret_cast<byte*>(sequence); - *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; - if (!initialized) { - // Since patcher is a large object, allocate it dynamically when needed, - // to avoid overloading the stack in stress conditions. - SmartPointer<CodePatcher> - patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength)); - PredictableCodeSizeScope scope(patcher->masm(), *length); - patcher->masm()->PushFixedFrame(r1); - patcher->masm()->nop(ip.code()); - patcher->masm()->add( - fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - initialized = true; - } - return byte_sequence; +CodeAgingHelper::CodeAgingHelper() { + ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength); + // Since patcher is a large object, allocate it dynamically when needed, + // to avoid overloading the stack in stress conditions. + // DONT_FLUSH is used because the CodeAgingHelper is initialized early in + // the process, before ARM simulator ICache is setup. + SmartPointer<CodePatcher> patcher( + new CodePatcher(young_sequence_.start(), + young_sequence_.length() / Assembler::kInstrSize, + CodePatcher::DONT_FLUSH)); + PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); + patcher->masm()->PushFixedFrame(r1); + patcher->masm()->nop(ip.code()); + patcher->masm()->add( + fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); +} + + +#ifdef DEBUG +bool CodeAgingHelper::IsOld(byte* candidate) const { + return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; } +#endif -bool Code::IsYoungSequence(byte* sequence) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - bool result = !memcmp(sequence, young_sequence, young_length); - ASSERT(result || - Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); +bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { + bool result = isolate->code_aging_helper()->IsYoung(sequence); + ASSERT(result || isolate->code_aging_helper()->IsOld(sequence)); return result; } -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, +void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, MarkingParity* parity) { - if (IsYoungSequence(sequence)) { + if (IsYoungSequence(isolate, sequence)) { *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { Address target_address = Memory::Address_at( - sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); + sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize)); Code* stub = GetCodeFromTargetAddress(target_address); GetCodeAgeAndParity(stub, age, parity); } @@ -900,10 +872,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age, MarkingParity parity) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); + uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { - CopyBytes(sequence, young_sequence, young_length); + isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); CPU::FlushICache(sequence, young_length); } else { Code* stub = GetCodeAgeStub(isolate, age, parity); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 0bf7ccadca..2fc8eb3f0a 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_ diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index 7d59a84b1d..676239f829 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -1,29 +1,6 @@ // Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 14f4705cbd..5bace505fa 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_ diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index 20c6a5dcce..083d9b39eb 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -1,29 +1,6 @@ // Copyright 2006-2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // CPU specific code for arm independent of OS goes here. #ifdef __arm__ @@ -46,16 +23,6 @@ namespace v8 { namespace internal { -void CPU::SetUp() { - CpuFeatures::Probe(); -} - - -bool CPU::SupportsCrankshaft() { - return CpuFeatures::IsSupported(VFP3); -} - - void CPU::FlushICache(void* start, size_t size) { // Nothing to do flushing no instructions. if (size == 0) { diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 12258ccad9..c3270f0bcd 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -35,7 +12,6 @@ namespace v8 { namespace internal { -#ifdef ENABLE_DEBUGGER_SUPPORT bool BreakLocationIterator::IsDebugBreakAtReturn() { return Debug::IsDebugBreakAtReturn(rinfo()); } @@ -56,7 +32,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() { patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0)); patcher.masm()->blx(v8::internal::ip); patcher.Emit( - debug_info_->GetIsolate()->debug()->debug_break_return()->entry()); + debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry()); patcher.masm()->bkpt(0); } @@ -97,7 +73,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() { patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0)); patcher.masm()->blx(v8::internal::ip); patcher.Emit( - debug_info_->GetIsolate()->debug()->debug_break_slot()->entry()); + debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry()); } @@ -146,7 +122,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, __ mov(r0, Operand::Zero()); // no arguments __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); - CEntryStub ceb(1); + CEntryStub ceb(masm->isolate(), 1); __ CallStub(&ceb); // Restore the register values from the expression stack. @@ -179,6 +155,16 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, } +void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) { + // Register state for CallICStub + // ----------- S t a t e ------------- + // -- r1 : function + // -- r3 : slot in feedback array (smi) + // ----------------------------------- + Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0); +} + + void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { // Calling convention for IC load (from ic-arm.cc). // ----------- S t a t e ------------- @@ -235,15 +221,6 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) { } -void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { - // Calling convention for IC call (from ic-arm.cc) - // ----------- S t a t e ------------- - // -- r2 : name - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, r2.bit(), 0); -} - - void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { // In places other than IC call sites it is expected that r0 is TOS which // is an object - this is not generally the case so this should be used with @@ -261,17 +238,6 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) { } -void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) { - // Register state for CallFunctionStub (from code-stubs-arm.cc). - // ----------- S t a t e ------------- - // -- r1 : function - // -- r2 : feedback array - // -- r3 : slot in feedback array - // ----------------------------------- - Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0); -} - - void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { // Calling convention for CallConstructStub (from code-stubs-arm.cc) // ----------- S t a t e ------------- @@ -329,10 +295,6 @@ const bool Debug::kFrameDropperSupported = false; #undef __ - - -#endif // ENABLE_DEBUGGER_SUPPORT - } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index ef3ea275cc..aa98c8b75f 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -54,7 +31,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { // Fail hard and early if we enter this code object again. byte* pointer = code->FindCodeAgeSequence(); if (pointer != NULL) { - pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize; + pointer += kNoCodeAgeSequenceLength; } else { pointer = code->instruction_start(); } @@ -87,7 +64,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { // We need calls to have a predictable size in the unoptimized code, but // this is optimized code, so we don't have to have a predictable size. int call_size_in_bytes = - MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry, + MacroAssembler::CallSizeNotPredictableCodeSize(isolate, + deopt_entry, RelocInfo::NONE32); int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index aa8ee22b73..0a5d5b0d39 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // A Disassembler object is used to disassemble a block of code instruction by // instruction. The default implementation of the NameConverter object can be @@ -1272,7 +1249,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && (instr->Bit(8) == 1)) { // vcvt.f64.s32 Dd, Dd, #<fbits> - int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); + int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5)); Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd"); out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", fraction_bits); diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index 780b48a8ef..605f9f4223 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 29000ca3ab..6dd5186404 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_FRAMES_ARM_H_ #define V8_ARM_FRAMES_ARM_H_ diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index b5ec2d5fdf..c22caa4a81 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -118,14 +95,20 @@ static void EmitStackCheck(MacroAssembler* masm_, Isolate* isolate = masm_->isolate(); Label ok; ASSERT(scratch.is(sp) == (pointers == 0)); + Heap::RootListIndex index; if (pointers != 0) { __ sub(scratch, sp, Operand(pointers * kPointerSize)); + index = Heap::kRealStackLimitRootIndex; + } else { + index = Heap::kStackLimitRootIndex; } - __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex); + __ LoadRoot(stack_limit_scratch, index); __ cmp(scratch, Operand(stack_limit_scratch)); __ b(hs, &ok); - PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); - __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); + Handle<Code> stack_check = isolate->builtins()->StackCheck(); + PredictableCodeSizeScope predictable(masm_, + masm_->CallSize(stack_check, RelocInfo::CODE_TARGET)); + __ Call(stack_check, RelocInfo::CODE_TARGET); __ bind(&ok); } @@ -150,8 +133,6 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); - InitializeFeedbackVector(); - profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); @@ -236,7 +217,7 @@ void FullCodeGenerator::Generate() { __ Push(info->scope()->GetScopeInfo()); __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); } else { __ push(r1); @@ -297,7 +278,7 @@ void FullCodeGenerator::Generate() { } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(isolate(), type); __ CallStub(&stub); SetVar(arguments, r0, r1, r2); @@ -1187,12 +1168,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Label non_proxy; __ bind(&fixed_array); - Handle<Object> feedback = Handle<Object>( - Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), - isolate()); - StoreFeedbackVectorSlot(slot, feedback); __ Move(r1, FeedbackVector()); - __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); + __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot))); __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check @@ -1351,7 +1328,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, !pretenure && scope()->is_function_scope() && info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode(), info->is_generator()); + FastNewClosureStub stub(isolate(), + info->strict_mode(), + info->is_generator()); __ mov(r2, Operand(info)); __ CallStub(&stub); } else { @@ -1671,13 +1650,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { : ObjectLiteral::kNoFlags; __ mov(r0, Operand(Smi::FromInt(flags))); int properties_count = constant_properties->length() / 2; - if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || - flags != ObjectLiteral::kFastElements || + if (expr->may_store_doubles() || expr->depth() > 1 || + Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ Push(r3, r2, r1, r0); __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); } else { - FastCloneShallowObjectStub stub(properties_count); + FastCloneShallowObjectStub stub(isolate(), properties_count); __ CallStub(&stub); } @@ -1816,13 +1795,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { if (has_fast_elements && constant_elements_values->map() == isolate()->heap()->fixed_cow_array_map()) { FastCloneShallowArrayStub stub( + isolate(), FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, allocation_site_mode, length); __ CallStub(&stub); __ IncrementCounter( isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); - } else if (expr->depth() > 1 || Serializer::enabled() || + } else if (expr->depth() > 1 || Serializer::enabled(isolate()) || length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ mov(r0, Operand(Smi::FromInt(flags))); __ Push(r3, r2, r1, r0); @@ -1837,7 +1817,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; } - FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + FastCloneShallowArrayStub stub(isolate(), mode, allocation_site_mode, + length); __ CallStub(&stub); } @@ -1869,7 +1850,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); } else { __ mov(r3, Operand(Smi::FromInt(i))); - StoreArrayLiteralElementStub stub; + StoreArrayLiteralElementStub stub(isolate()); __ CallStub(&stub); } @@ -1886,7 +1867,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { void FullCodeGenerator::VisitAssignment(Assignment* expr) { - ASSERT(expr->target()->IsValidLeftHandSide()); + ASSERT(expr->target()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ Assignment"); @@ -2114,7 +2095,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) { CallIC(ic, TypeFeedbackId::None()); __ mov(r1, r0); __ str(r1, MemOperand(sp, 2 * kPointerSize)); - CallFunctionStub stub(1, CALL_AS_METHOD); + CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); __ CallStub(&stub); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2272,7 +2253,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) { Label gc_required; Label allocated; - Handle<Map> map(isolate()->native_context()->generator_result_map()); + Handle<Map> map(isolate()->native_context()->iterator_result_map()); __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT); __ jmp(&allocated); @@ -2343,8 +2324,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, patch_site.EmitJumpIfSmi(scratch1, &smi_case); __ bind(&stub_call); - BinaryOpICStub stub(op, mode); - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + BinaryOpICStub stub(isolate(), op, mode); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done); @@ -2419,16 +2400,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op, OverwriteMode mode) { __ pop(r1); - BinaryOpICStub stub(op, mode); + BinaryOpICStub stub(isolate(), op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); + CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(r0); } void FullCodeGenerator::EmitAssignment(Expression* expr) { - ASSERT(expr->IsValidLeftHandSide()); + ASSERT(expr->IsValidReferenceExpression()); // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2628,14 +2609,15 @@ void FullCodeGenerator::CallIC(Handle<Code> code, // Code common for calls using the IC. -void FullCodeGenerator::EmitCallWithIC(Call* expr) { +void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); - CallFunctionFlags flags; + CallIC::CallType call_type = callee->IsVariableProxy() + ? CallIC::FUNCTION + : CallIC::METHOD; + // Get the target function. - if (callee->IsVariableProxy()) { + if (call_type == CallIC::FUNCTION) { { StackValueContext context(this); EmitVariableLoad(callee->AsVariableProxy()); PrepareForBailout(callee, NO_REGISTERS); @@ -2643,7 +2625,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) { // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. __ Push(isolate()->factory()->undefined_value()); - flags = NO_CALL_FUNCTION_FLAGS; } else { // Load the function from the receiver. ASSERT(callee->IsProperty()); @@ -2654,40 +2635,19 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) { __ ldr(ip, MemOperand(sp, 0)); __ push(ip); __ str(r0, MemOperand(sp, kPointerSize)); - flags = CALL_AS_METHOD; } - // Load the arguments. - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, flags); - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - - // Restore context register. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, r0); + EmitCall(expr, call_type); } // Code common for calls using the IC. -void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { +void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, + Expression* key) { // Load the key. VisitForAccumulatorValue(key); Expression* callee = expr->expression(); - ZoneList<Expression*>* args = expr->arguments(); - int arg_count = args->length(); // Load the function from the receiver. ASSERT(callee->IsProperty()); @@ -2700,28 +2660,12 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, __ push(ip); __ str(r0, MemOperand(sp, kPointerSize)); - { PreservePositionScope scope(masm()->positions_recorder()); - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } - } - - // Record source position for debugger. - SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, CALL_AS_METHOD); - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); - - RecordJSReturnSite(expr); - // Restore context register. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - context()->DropAndPlug(1, r0); + EmitCall(expr, CallIC::METHOD); } -void FullCodeGenerator::EmitCallWithStub(Call* expr) { - // Code common for calls using the call stub. +void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { + // Load the arguments. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); { PreservePositionScope scope(masm()->positions_recorder()); @@ -2729,19 +2673,17 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { VisitForStackValue(args->at(i)); } } - // Record source position for debugger. - SetSourcePosition(expr->position()); - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); - __ Move(r2, FeedbackVector()); + // Record source position of the IC call. + SetSourcePosition(expr->position()); + Handle<Code> ic = CallIC::initialize_stub( + isolate(), arg_count, call_type); __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); - - // Record call targets in unoptimized code. - CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + // Don't assign a type feedback id to the IC, since type feedback is provided + // by the vector above. + CallIC(ic); + RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2816,7 +2758,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Record source position for debugger. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); RecordJSReturnSite(expr); @@ -2824,7 +2766,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, r0); } else if (call_type == Call::GLOBAL_CALL) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else if (call_type == Call::LOOKUP_SLOT_CALL) { // Call to a lookup slot (dynamically introduced variable). @@ -2864,16 +2806,16 @@ void FullCodeGenerator::VisitCall(Call* expr) { // The receiver is either the global receiver or an object found // by LoadContextSlot. - EmitCallWithStub(expr); + EmitCall(expr); } else if (call_type == Call::PROPERTY_CALL) { Property* property = callee->AsProperty(); { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(property->obj()); } if (property->key()->IsPropertyName()) { - EmitCallWithIC(expr); + EmitCallWithLoadIC(expr); } else { - EmitKeyedCallWithIC(expr, property->key()); + EmitKeyedCallWithLoadIC(expr, property->key()); } } else { ASSERT(call_type == Call::OTHER_CALL); @@ -2884,7 +2826,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ push(r1); // Emit function call. - EmitCallWithStub(expr); + EmitCall(expr); } #ifdef DEBUG @@ -2921,12 +2863,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); // Record call targets in unoptimized code. - Handle<Object> uninitialized = - TypeFeedbackInfo::UninitializedSentinel(isolate()); - StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); if (FLAG_pretenuring_call_new) { - StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), - isolate()->factory()->NewAllocationSite()); + EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot()); ASSERT(expr->AllocationSiteFeedbackSlot() == expr->CallNewFeedbackSlot() + 1); } @@ -2934,8 +2872,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ Move(r2, FeedbackVector()); __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); - CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); + CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(r0); } @@ -3305,7 +3243,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { VisitForAccumulatorValue(args->at(0)); __ mov(r1, r0); __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(r0); } @@ -3391,31 +3329,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { } -void FullCodeGenerator::EmitLog(CallRuntime* expr) { - // Conditionally generate a log call. - // Args: - // 0 (literal string): The type of logging (corresponds to the flags). - // This is used to determine whether or not to generate the log call. - // 1 (string): Format string. Access the string at argument index 2 - // with '%2s' (see Logger::LogRuntime for all the formats). - // 2 (array): Arguments to the format string. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT_EQ(args->length(), 3); - if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) { - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - __ CallRuntime(Runtime::kHiddenLog, 2); - } - - // Finally, we're expected to leave a value on the top of the stack. - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - context()->Plug(r0); -} - - void FullCodeGenerator::EmitSubString(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - SubStringStub stub; + SubStringStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 3); VisitForStackValue(args->at(0)); @@ -3428,7 +3344,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) { void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { // Load the arguments on the stack and call the stub. - RegExpExecStub stub; + RegExpExecStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 4); VisitForStackValue(args->at(0)); @@ -3578,7 +3494,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { ASSERT(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - MathPowStub stub(MathPowStub::ON_STACK); + MathPowStub stub(isolate(), MathPowStub::ON_STACK); __ CallStub(&stub); context()->Plug(r0); } @@ -3618,7 +3534,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { // Load the argument into r0 and call the stub. VisitForAccumulatorValue(args->at(0)); - NumberToStringStub stub; + NumberToStringStub stub(isolate()); __ CallStub(&stub); context()->Plug(r0); } @@ -3741,7 +3657,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { VisitForAccumulatorValue(args->at(1)); __ pop(r1); - StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); + StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED); __ CallStub(&stub); context()->Plug(r0); } @@ -3753,32 +3669,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringCompareStub stub; + StringCompareStub stub(isolate()); __ CallStub(&stub); context()->Plug(r0); } -void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_log, 1); - context()->Plug(r0); -} - - -void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { - // Load the argument on the stack and call the runtime function. - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 1); - VisitForStackValue(args->at(0)); - __ CallRuntime(Runtime::kMath_sqrt, 1); - context()->Plug(r0); -} - - void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() >= 2); @@ -3812,7 +3708,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { - RegExpConstructResultStub stub; + RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 3); VisitForStackValue(args->at(0)); @@ -4178,7 +4074,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { // Record source position of the IC call. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -4310,7 +4206,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { - ASSERT(expr->expression()->IsValidLeftHandSide()); + ASSERT(expr->expression()->IsValidReferenceExpression()); Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -4396,7 +4292,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ jmp(&stub_call); __ bind(&slow); } - ToNumberStub convert_stub; + ToNumberStub convert_stub(isolate()); __ CallStub(&convert_stub); // Save result for postfix expressions. @@ -4427,8 +4323,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Record position before stub call. SetSourcePosition(expr->position()); - BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); - CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); + BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); + CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4539,13 +4435,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(check, factory->number_string())) { __ JumpIfSmi(r0, if_true); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_string())) { + } else if (String::Equals(check, factory->string_string())) { __ JumpIfSmi(r0, if_false); // Check for undetectable objects => false. __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); @@ -4553,20 +4450,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); __ tst(r1, Operand(1 << Map::kIsUndetectable)); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->symbol_string())) { + } else if (String::Equals(check, factory->symbol_string())) { __ JumpIfSmi(r0, if_false); __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_string())) { + } else if (String::Equals(check, factory->boolean_string())) { __ CompareRoot(r0, Heap::kTrueValueRootIndex); __ b(eq, if_true); __ CompareRoot(r0, Heap::kFalseValueRootIndex); Split(eq, if_true, if_false, fall_through); } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_string())) { + String::Equals(check, factory->null_string())) { __ CompareRoot(r0, Heap::kNullValueRootIndex); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_string())) { + } else if (String::Equals(check, factory->undefined_string())) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); @@ -4576,14 +4473,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ tst(r1, Operand(1 << Map::kIsUndetectable)); Split(ne, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_string())) { + } else if (String::Equals(check, factory->function_string())) { __ JumpIfSmi(r0, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); __ b(eq, if_true); __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_string())) { + } else if (String::Equals(check, factory->object_string())) { __ JumpIfSmi(r0, if_false); if (!FLAG_harmony_typeof) { __ CompareRoot(r0, Heap::kNullValueRootIndex); @@ -4636,7 +4533,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { case Token::INSTANCEOF: { VisitForStackValue(expr->right()); - InstanceofStub stub(InstanceofStub::kNoFlags); + InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); __ CallStub(&stub); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); // The stub returns 0 for true. @@ -4780,7 +4677,8 @@ void FullCodeGenerator::EnterFinallyBlock() { ExternalReference has_pending_message = ExternalReference::address_of_has_pending_message(isolate()); __ mov(ip, Operand(has_pending_message)); - __ ldr(r1, MemOperand(ip)); + STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof) + __ ldrb(r1, MemOperand(ip)); __ SmiTag(r1); __ push(r1); @@ -4806,7 +4704,8 @@ void FullCodeGenerator::ExitFinallyBlock() { ExternalReference has_pending_message = ExternalReference::address_of_has_pending_message(isolate()); __ mov(ip, Operand(has_pending_message)); - __ str(r1, MemOperand(ip)); + STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof) + __ strb(r1, MemOperand(ip)); __ pop(r1); ExternalReference pending_message_obj = diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 3d57105afe..4626e37516 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 55705b8073..0c10a65c21 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -623,6 +600,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, !hinstr->HasObservableSideEffects(); if (needs_environment && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); + // We can't really figure out if the environment is needed or not. + instr->environment()->set_has_been_used(); } return instr; @@ -871,7 +850,8 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { // the it was just a plain use), so it is free to move the split child into // the same register that is used for the use-at-start. // See https://code.google.com/p/chromium/issues/detail?id=201590 - if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { + if (!(instr->ClobbersRegisters() && + instr->ClobbersDoubleRegisters(isolate()))) { int fixed = 0; int used_at_start = 0; for (UseIterator it(instr); !it.Done(); it.Advance()) { @@ -931,18 +911,20 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { if (goto_instr != NULL) return goto_instr; HValue* value = instr->value(); - LBranch* result = new(zone()) LBranch(UseRegister(value)); - // Tagged values that are not known smis or booleans require a - // deoptimization environment. If the instruction is generic no - // environment is needed since all cases are handled. - Representation rep = value->representation(); + Representation r = value->representation(); HType type = value->type(); ToBooleanStub::Types expected = instr->expected_input_types(); - if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() && - !expected.IsGeneric()) { - return AssignEnvironment(result); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() || + type.IsJSArray() || type.IsHeapNumber() || type.IsString(); + LInstruction* branch = new(zone()) LBranch(UseRegister(value)); + if (!easy_case && + ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) || + !expected.IsGeneric())) { + branch = AssignEnvironment(branch); } - return result; + return branch; } @@ -1138,8 +1120,11 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { ? NULL : UseFixed(instr->context(), cp); LOperand* input = UseRegister(instr->value()); - LMathAbs* result = new(zone()) LMathAbs(context, input); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + LInstruction* result = + DefineAsRegister(new(zone()) LMathAbs(context, input)); + if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result); + if (!r.IsDouble()) result = AssignEnvironment(result); + return result; } @@ -1284,15 +1269,25 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) { } -LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { +LInstruction* LChunkBuilder::DoDivI(HDiv* instr) { ASSERT(instr->representation().IsSmiOrInteger32()); ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4); - LDivI* div = new(zone()) LDivI(dividend, divisor, temp); - return AssignEnvironment(DefineAsRegister(div)); + LInstruction* result = + DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp)); + if (instr->CheckFlag(HValue::kCanBeDivByZero) || + instr->CheckFlag(HValue::kBailoutOnMinusZero) || + (instr->CheckFlag(HValue::kCanOverflow) && + (!CpuFeatures::IsSupported(SUDIV) || + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) || + (!instr->IsMathFloorOfDiv() && + !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + result = AssignEnvironment(result); + } + return result; } @@ -1346,13 +1341,25 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) { } +LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) { + ASSERT(instr->representation().IsSmiOrInteger32()); + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4); + LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp); + return AssignEnvironment(DefineAsRegister(div)); +} + + LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { if (instr->RightIsPowerOf2()) { return DoFlooringDivByPowerOf2I(instr); } else if (instr->right()->IsConstant()) { return DoFlooringDivByConstI(instr); } else { - return DoDivI(instr); + return DoFlooringDivI(instr); } } @@ -1647,6 +1654,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareNumericAndBranch( HCompareNumericAndBranch* instr) { + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(r)); @@ -1801,9 +1810,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { - LOperand* value = UseRegisterOrConstantAtStart(instr->index()); - LOperand* length = UseRegister(instr->length()); - return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); + if (!FLAG_debug_code && instr->skip_check()) return NULL; + LOperand* index = UseRegisterOrConstantAtStart(instr->index()); + LOperand* length = !index->IsConstantOperand() + ? UseRegisterOrConstantAtStart(instr->length()) + : UseRegisterAtStart(instr->length()); + LInstruction* result = new(zone()) LBoundsCheck(index, length); + if (!FLAG_debug_code || !instr->skip_check()) { + result = AssignEnvironment(result); + } + return result; } @@ -1837,20 +1853,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + HValue* val = instr->value(); if (from.IsSmi()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return DefineSameAsFirst(new(zone()) LDummyUse(value)); } from = Representation::Tagged(); } if (from.IsTagged()) { if (to.IsDouble()) { - LOperand* value = UseRegister(instr->value()); - LNumberUntagD* res = new(zone()) LNumberUntagD(value); - return AssignEnvironment(DefineAsRegister(res)); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); if (val->type().IsSmi()) { return DefineSameAsFirst(new(zone()) LDummyUse(value)); @@ -1858,66 +1875,59 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { ASSERT(to.IsInteger32()); - LOperand* value = NULL; - LInstruction* res = NULL; - HValue* val = instr->value(); if (val->type().IsSmi() || val->representation().IsSmi()) { - value = UseRegisterAtStart(val); - res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); + LOperand* value = UseRegisterAtStart(val); + return DefineAsRegister(new(zone()) LSmiUntag(value, false)); } else { - value = UseRegister(val); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = FixedTemp(d11); - res = DefineSameAsFirst(new(zone()) LTaggedToI(value, - temp1, - temp2)); - res = AssignEnvironment(res); + LInstruction* result = + DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2)); + if (!val->representation().IsSmi()) result = AssignEnvironment(result); + return result; } - return res; } } else if (from.IsDouble()) { if (to.IsTagged()) { info()->MarkAsDeferredCalling(); - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - - // Make sure that the temp and result_temp registers are - // different. LUnallocated* result_temp = TempRegister(); LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); - Define(result, result_temp); - return AssignPointerMap(result); + return AssignPointerMap(Define(result, result_temp)); } else if (to.IsSmi()) { - LOperand* value = UseRegister(instr->value()); + LOperand* value = UseRegister(val); return AssignEnvironment( DefineAsRegister(new(zone()) LDoubleToSmi(value))); } else { ASSERT(to.IsInteger32()); - LOperand* value = UseRegister(instr->value()); - LDoubleToI* res = new(zone()) LDoubleToI(value); - return AssignEnvironment(DefineAsRegister(res)); + LOperand* value = UseRegister(val); + LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value)); + if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result); + return result; } } else if (from.IsInteger32()) { info()->MarkAsDeferredCalling(); if (to.IsTagged()) { - HValue* val = instr->value(); - LOperand* value = UseRegisterAtStart(val); if (!instr->CheckFlag(HValue::kCanOverflow)) { + LOperand* value = UseRegisterAtStart(val); return DefineAsRegister(new(zone()) LSmiTag(value)); } else if (val->CheckFlag(HInstruction::kUint32)) { + LOperand* value = UseRegisterAtStart(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } else { + LOperand* value = UseRegisterAtStart(val); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } } else if (to.IsSmi()) { - HValue* val = instr->value(); LOperand* value = UseRegister(val); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); if (instr->CheckFlag(HValue::kCanOverflow)) { @@ -1926,12 +1936,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return result; } else { ASSERT(to.IsDouble()); - if (instr->value()->CheckFlag(HInstruction::kUint32)) { - return DefineAsRegister( - new(zone()) LUint32ToDouble(UseRegister(instr->value()))); + if (val->CheckFlag(HInstruction::kUint32)) { + return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val))); } else { - return DefineAsRegister( - new(zone()) LInteger32ToDouble(Use(instr->value()))); + return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val))); } } } @@ -1942,7 +1950,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckNonSmi(value)); + LInstruction* result = new(zone()) LCheckNonSmi(value); + if (!instr->value()->IsHeapObject()) result = AssignEnvironment(result); + return result; } @@ -1966,15 +1976,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { - LOperand* value = NULL; - if (!instr->CanOmitMapChecks()) { - value = UseRegisterAtStart(instr->value()); - if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); - } - LCheckMaps* result = new(zone()) LCheckMaps(value); - if (!instr->CanOmitMapChecks()) { - AssignEnvironment(result); - if (instr->has_migration_target()) return AssignPointerMap(result); + if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps; + LOperand* value = UseRegisterAtStart(instr->value()); + LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value)); + if (instr->HasMigrationTarget()) { + info()->MarkAsDeferredCalling(); + result = AssignPointerMap(result); } return result; } @@ -2072,7 +2079,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LOperand* context = UseRegisterAtStart(instr->value()); LInstruction* result = DefineAsRegister(new(zone()) LLoadContextSlot(context)); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2087,7 +2097,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { value = UseRegister(instr->value()); } LInstruction* result = new(zone()) LStoreContextSlot(context, value); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) { + result = AssignEnvironment(result); + } + return result; } @@ -2122,7 +2135,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyed* result = NULL; + LInstruction* result = NULL; if (!instr->is_typed_elements()) { LOperand* obj = NULL; @@ -2132,24 +2145,28 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->representation().IsSmiOrTagged()); obj = UseRegisterAtStart(instr->elements()); } - result = new(zone()) LLoadKeyed(obj, key); + result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key)); } else { ASSERT( (instr->representation().IsInteger32() && - !IsDoubleOrFloatElementsKind(instr->elements_kind())) || + !IsDoubleOrFloatElementsKind(elements_kind)) || (instr->representation().IsDouble() && - IsDoubleOrFloatElementsKind(instr->elements_kind()))); + IsDoubleOrFloatElementsKind(elements_kind))); LOperand* backing_store = UseRegister(instr->elements()); - result = new(zone()) LLoadKeyed(backing_store, key); + result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key)); } - DefineAsRegister(result); - // An unsigned int array load might overflow and cause a deopt, make sure it - // has an environment. - bool can_deoptimize = instr->RequiresHoleCheck() || - elements_kind == EXTERNAL_UINT32_ELEMENTS || - elements_kind == UINT32_ELEMENTS; - return can_deoptimize ? AssignEnvironment(result) : result; + if ((instr->is_external() || instr->is_fixed_typed_array()) ? + // see LCodeGen::DoLoadKeyedExternalArray + ((elements_kind == EXTERNAL_UINT32_ELEMENTS || + elements_kind == UINT32_ELEMENTS) && + !instr->CheckFlag(HInstruction::kUint32)) : + // see LCodeGen::DoLoadKeyedFixedDoubleArray and + // LCodeGen::DoLoadKeyedFixedArray + instr->RequiresHoleCheck()) { + result = AssignEnvironment(result); + } + return result; } @@ -2225,17 +2242,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - LOperand* object = UseRegister(instr->object()); if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { + LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); return result; } else { + LOperand* object = UseFixed(instr->object(), r0); LOperand* context = UseFixed(instr->context(), cp); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, context, NULL); - return AssignPointerMap(result); + return MarkAsCall(result, instr); } } @@ -2279,11 +2297,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { // We need a temporary register for write barrier of the map field. LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; - LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp); - if (instr->field_representation().IsHeapObject()) { - if (!instr->value()->type().IsHeapObject()) { - return AssignEnvironment(result); - } + LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp); + if (!instr->access().IsExternalMemory() && + instr->field_representation().IsHeapObject() && + !instr->value()->type().IsHeapObject()) { + result = AssignEnvironment(result); } return result; } @@ -2315,7 +2333,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { LOperand* context = UseAny(instr->context()); LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(context, string, index); - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + return AssignPointerMap(DefineAsRegister(result)); } @@ -2371,7 +2389,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { } else { ASSERT(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = - info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + info()->code_stub()->GetInterfaceDescriptor(); int index = static_cast<int>(instr->index()); Register reg = descriptor->GetParameterRegister(index); return DefineFixed(result, reg); @@ -2478,6 +2496,7 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HEnvironment* outer = current_block_->last_environment(); + outer->set_ast_id(instr->ReturnId()); HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->arguments_count(), @@ -2538,7 +2557,9 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LOperand* object = UseRegister(instr->object()); LOperand* index = UseRegister(instr->index()); - return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index)); + LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index); + LInstruction* result = DefineSameAsFirst(load); + return AssignPointerMap(result); } } } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 34eb510177..1a90eb638b 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_LITHIUM_ARM_H_ #define V8_ARM_LITHIUM_ARM_H_ @@ -97,6 +74,7 @@ class LCodeGen; V(DummyUse) \ V(FlooringDivByConstI) \ V(FlooringDivByPowerOf2I) \ + V(FlooringDivI) \ V(ForInCacheArray) \ V(ForInPrepareMap) \ V(FunctionLiteral) \ @@ -261,7 +239,9 @@ class LInstruction : public ZoneObject { // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - virtual bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const { + return IsCall(); + } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return IsCall(); } @@ -713,14 +693,14 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> { class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* left, LOperand* right, LOperand* temp) { - inputs_[0] = left; - inputs_[1] = right; + LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; temps_[0] = temp; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") @@ -767,6 +747,23 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> { }; +class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { + public: + LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) { + inputs_[0] = dividend; + inputs_[1] = divisor; + temps_[0] = temp; + } + + LOperand* dividend() { return inputs_[0]; } + LOperand* divisor() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: LMulI(LOperand* left, LOperand* right) { @@ -1968,7 +1965,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> { DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE { return save_doubles() == kDontSaveFPRegs; } @@ -2164,7 +2161,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> transition() const { return hydrogen()->transition_map(); } Representation representation() const { return hydrogen()->field_representation(); } @@ -2379,7 +2375,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> { class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMaps(LOperand* value) { + explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; } @@ -2696,6 +2692,8 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase { next_block_(NULL), allocator_(allocator) { } + Isolate* isolate() const { return graph_->isolate(); } + // Build the sequence for the graph. LPlatformChunk* Build(); @@ -2722,12 +2720,13 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase { LInstruction* DoMathClz32(HUnaryMathOperation* instr); LInstruction* DoDivByPowerOf2I(HDiv* instr); LInstruction* DoDivByConstI(HDiv* instr); - LInstruction* DoDivI(HBinaryOperation* instr); + LInstruction* DoDivI(HDiv* instr); LInstruction* DoModByPowerOf2I(HMod* instr); LInstruction* DoModByConstI(HMod* instr); LInstruction* DoModI(HMod* instr); LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); + LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr); private: enum Status { diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 7152ba21cc..5a01d3bc84 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -86,13 +63,6 @@ void LCodeGen::FinishCode(Handle<Code> code) { code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); PopulateDeoptimizationData(code); - info()->CommitDependencies(code); -} - - -void LCodeGen::Abort(BailoutReason reason) { - info()->set_bailout_reason(reason); - status_ = ABORTED; } @@ -207,7 +177,7 @@ bool LCodeGen::GeneratePrologue() { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is in r1. if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub(heap_slots); + FastNewContextStub stub(isolate(), heap_slots); __ CallStub(&stub); } else { __ push(r1); @@ -714,6 +684,16 @@ void LCodeGen::AddToTranslation(LEnvironment* environment, } +int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) { + int size = masm()->CallSize(code, mode); + if (code->kind() == Code::BINARY_OP_IC || + code->kind() == Code::COMPARE_IC) { + size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric. + } + return size; +} + + void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr, @@ -783,6 +763,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); if (!environment->HasBeenRegistered()) { // Physical stack frame layout: // -x ............. -4 0 ..................................... y @@ -906,7 +887,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { int length = deoptimizations_.length(); if (length == 0) return; Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + DeoptimizationInputData::New(isolate(), length, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(isolate()->factory()); @@ -1095,18 +1076,18 @@ void LCodeGen::DoCallStub(LCallStub* instr) { ASSERT(ToRegister(instr->result()).is(r0)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpExec: { - RegExpExecStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { - SubStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { - StringCompareStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + StringCompareStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } default: @@ -1293,7 +1274,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { Register dividend = ToRegister(instr->dividend()); int32_t divisor = instr->divisor(); Register result = ToRegister(instr->result()); - ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); + ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor))); ASSERT(!result.is(dividend)); // Check for (0 / -x) that will produce negative zero. @@ -1363,15 +1344,16 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { } +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. void LCodeGen::DoDivI(LDivI* instr) { HBinaryOperation* hdiv = instr->hydrogen(); - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); Register result = ToRegister(instr->result()); // Check for x / 0. if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand::Zero()); + __ cmp(divisor, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); } @@ -1380,10 +1362,10 @@ void LCodeGen::DoDivI(LDivI* instr) { Label positive; if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { // Do the test only if it hadn't be done above. - __ cmp(right, Operand::Zero()); + __ cmp(divisor, Operand::Zero()); } __ b(pl, &positive); - __ cmp(left, Operand::Zero()); + __ cmp(dividend, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); __ bind(&positive); } @@ -1394,39 +1376,30 @@ void LCodeGen::DoDivI(LDivI* instr) { !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { // We don't need to check for overflow when truncating with sdiv // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. - __ cmp(left, Operand(kMinInt)); - __ cmp(right, Operand(-1), eq); + __ cmp(dividend, Operand(kMinInt)); + __ cmp(divisor, Operand(-1), eq); DeoptimizeIf(eq, instr->environment()); } if (CpuFeatures::IsSupported(SUDIV)) { CpuFeatureScope scope(masm(), SUDIV); - __ sdiv(result, left, right); + __ sdiv(result, dividend, divisor); } else { DoubleRegister vleft = ToDoubleRegister(instr->temp()); DoubleRegister vright = double_scratch0(); - __ vmov(double_scratch0().low(), left); + __ vmov(double_scratch0().low(), dividend); __ vcvt_f64_s32(vleft, double_scratch0().low()); - __ vmov(double_scratch0().low(), right); + __ vmov(double_scratch0().low(), divisor); __ vcvt_f64_s32(vright, double_scratch0().low()); __ vdiv(vleft, vleft, vright); // vleft now contains the result. __ vcvt_s32_f64(double_scratch0().low(), vleft); __ vmov(result, double_scratch0().low()); } - if (hdiv->IsMathFloorOfDiv()) { - Label done; - Register remainder = scratch0(); - __ mls(remainder, result, right, left); - __ cmp(remainder, Operand::Zero()); - __ b(eq, &done); - __ eor(remainder, remainder, Operand(right)); - __ add(result, result, Operand(remainder, ASR, 31)); - __ bind(&done); - } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { // Compute remainder and deopt if it's not zero. Register remainder = scratch0(); - __ mls(remainder, result, right, left); + __ mls(remainder, result, divisor, dividend); __ cmp(remainder, Operand::Zero()); DeoptimizeIf(ne, instr->environment()); } @@ -1476,19 +1449,21 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(eq, instr->environment()); } - if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { - // Note that we could emit branch-free code, but that would need one more - // register. - if (divisor == -1) { - DeoptimizeIf(vs, instr->environment()); - __ mov(result, Operand(dividend, ASR, shift)); - } else { - __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); - __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc); - } - } else { + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { __ mov(result, Operand(dividend, ASR, shift)); + return; + } + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + DeoptimizeIf(vs, instr->environment()); + return; } + + __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); + __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc); } @@ -1538,6 +1513,69 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { } +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register left = ToRegister(instr->dividend()); + Register right = ToRegister(instr->divisor()); + Register result = ToRegister(instr->result()); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ cmp(right, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label positive; + if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { + // Do the test only if it hadn't be done above. + __ cmp(right, Operand::Zero()); + } + __ b(pl, &positive); + __ cmp(left, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + __ bind(&positive); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && + (!CpuFeatures::IsSupported(SUDIV) || + !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { + // We don't need to check for overflow when truncating with sdiv + // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. + __ cmp(left, Operand(kMinInt)); + __ cmp(right, Operand(-1), eq); + DeoptimizeIf(eq, instr->environment()); + } + + if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatureScope scope(masm(), SUDIV); + __ sdiv(result, left, right); + } else { + DoubleRegister vleft = ToDoubleRegister(instr->temp()); + DoubleRegister vright = double_scratch0(); + __ vmov(double_scratch0().low(), left); + __ vcvt_f64_s32(vleft, double_scratch0().low()); + __ vmov(double_scratch0().low(), right); + __ vcvt_f64_s32(vright, double_scratch0().low()); + __ vdiv(vleft, vleft, vright); // vleft now contains the result. + __ vcvt_s32_f64(double_scratch0().low(), vleft); + __ vmov(result, double_scratch0().low()); + } + + Label done; + Register remainder = scratch0(); + __ mls(remainder, result, right, left); + __ cmp(remainder, Operand::Zero()); + __ b(eq, &done); + __ eor(remainder, remainder, Operand(right)); + __ add(result, result, Operand(remainder, ASR, 31)); + __ bind(&done); +} + + void LCodeGen::DoMulI(LMulI* instr) { Register result = ToRegister(instr->result()); // Note that result may alias left. @@ -1835,9 +1873,16 @@ void LCodeGen::DoConstantE(LConstantE* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { - Handle<Object> value = instr->value(isolate()); + Handle<Object> object = instr->value(isolate()); AllowDeferredHandleDereference smi_check; - __ Move(ToRegister(instr->result()), value); + if (instr->hydrogen()->HasObjectMap()) { + Handle<Map> object_map = instr->hydrogen()->ObjectMap().handle(); + ASSERT(object->IsHeapObject()); + ASSERT(!object_map->is_stable() || + *object_map == Handle<HeapObject>::cast(object)->map()); + USE(object_map); + } + __ Move(ToRegister(instr->result()), object); } @@ -2091,11 +2136,11 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->right()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); - BinaryOpICStub stub(instr->op(), NO_OVERWRITE); + BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); // Block literal pool emission to ensure nop indicating no inlined smi code // is in the correct position. Assembler::BlockConstPoolScope block_const_pool(masm()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -2686,8 +2731,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. - InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ cmp(r0, Operand::Zero()); __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); @@ -2783,7 +2828,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, flags | InstanceofStub::kCallSiteInlineCheck); flags = static_cast<InstanceofStub::Flags>( flags | InstanceofStub::kReturnTrueFalseObject); - InstanceofStub stub(flags); + InstanceofStub stub(isolate(), flags); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); LoadContextFromDeferred(instr->context()); @@ -2805,7 +2850,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); __ nop(); } - CallCodeGeneric(stub.GetCode(isolate()), + CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -3309,7 +3354,8 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size)); } else { ASSERT_EQ(-1, shift_size); - __ add(scratch0(), scratch0(), Operand(key, LSR, 1)); + // key can be negative, so using ASR here. + __ add(scratch0(), scratch0(), Operand(key, ASR, 1)); } return MemOperand(base, scratch0()); } @@ -3801,7 +3847,7 @@ void LCodeGen::DoPower(LPower* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); if (exponent_type.IsSmi()) { - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; @@ -3811,14 +3857,14 @@ void LCodeGen::DoPower(LPower* instr) { __ cmp(r6, Operand(ip)); DeoptimizeIf(ne, instr->environment()); __ bind(&no_deopt); - MathPowStub stub(MathPowStub::TAGGED); + MathPowStub stub(isolate(), MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { - MathPowStub stub(MathPowStub::INTEGER); + MathPowStub stub(isolate(), MathPowStub::INTEGER); __ CallStub(&stub); } else { ASSERT(exponent_type.IsDouble()); - MathPowStub stub(MathPowStub::DOUBLE); + MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStub(&stub); } } @@ -3925,8 +3971,8 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -3938,8 +3984,8 @@ void LCodeGen::DoCallNew(LCallNew* instr) { __ mov(r0, Operand(instr->arity())); // No cell in r2 for construct type feedback in optimized code __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } @@ -3957,8 +4003,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { : DONT_OVERRIDE; if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { @@ -3970,18 +4016,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ b(eq, &packed_case); ElementsKind holey_kind = GetHoleyElementsKind(kind); - ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ jmp(&done); __ bind(&packed_case); } - ArraySingleArgumentConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); __ bind(&done); } else { - ArrayNArgumentsConstructorStub stub(kind, override_mode); - CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); } } @@ -4028,7 +4076,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { return; } - Handle<Map> transition = instr->transition(); SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; @@ -4042,19 +4089,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ SmiTst(value); DeoptimizeIf(eq, instr->environment()); - // We know that value is a smi now, so we can omit the check below. + // We know now that value is not a smi, so we can omit the check below. check_needed = OMIT_SMI_CHECK; } } else if (representation.IsDouble()) { - ASSERT(transition.is_null()); ASSERT(access.IsInobject()); + ASSERT(!instr->hydrogen()->has_transition()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); DwVfpRegister value = ToDoubleRegister(instr->value()); __ vstr(value, FieldMemOperand(object, offset)); return; } - if (!transition.is_null()) { + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); __ mov(scratch, Operand(transition)); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); if (instr->hydrogen()->NeedsWriteBarrierForMap()) { @@ -4119,38 +4168,29 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } -void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) { - if (FLAG_debug_code && check->hydrogen()->skip_check()) { +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; + if (instr->index()->IsConstantOperand()) { + Operand index = ToOperand(instr->index()); + Register length = ToRegister(instr->length()); + __ cmp(length, index); + cc = ReverseCondition(cc); + } else { + Register index = ToRegister(instr->index()); + Operand length = ToOperand(instr->length()); + __ cmp(index, length); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { Label done; - __ b(NegateCondition(condition), &done); + __ b(NegateCondition(cc), &done); __ stop("eliminated bounds check failed"); __ bind(&done); } else { - DeoptimizeIf(condition, check->environment()); + DeoptimizeIf(cc, instr->environment()); } } -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - if (instr->hydrogen()->skip_check()) return; - - if (instr->index()->IsConstantOperand()) { - int constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->hydrogen()->length()->representation().IsSmi()) { - __ mov(ip, Operand(Smi::FromInt(constant_index))); - } else { - __ mov(ip, Operand(constant_index)); - } - __ cmp(ip, ToRegister(instr->length())); - } else { - __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); - } - Condition condition = instr->hydrogen()->allow_equality() ? hi : hs; - ApplyCheckIf(condition, instr); -} - - void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; @@ -4381,15 +4421,15 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { scratch, GetLinkRegisterState(), kDontSaveFPRegs); } else { ASSERT(ToRegister(instr->context()).is(cp)); + ASSERT(object_reg.is(r0)); PushSafepointRegistersScope scope( this, Safepoint::kWithRegistersAndDoubles); - __ Move(r0, object_reg); __ Move(r1, to_map); bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; - TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); __ CallStub(&stub); RecordSafepointWithRegistersAndDoubles( - instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + instr->pointer_map(), 0, Safepoint::kLazyDeopt); } __ bind(¬_applicable); } @@ -4409,9 +4449,10 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->left()).is(r1)); ASSERT(ToRegister(instr->right()).is(r0)); - StringAddStub stub(instr->hydrogen()->flags(), + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), instr->hydrogen()->pretenure_flag()); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -5106,7 +5147,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { Register object_; }; - if (instr->hydrogen()->CanOmitMapChecks()) return; + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } + Register map_reg = scratch0(); LOperand* input = instr->value(); @@ -5116,22 +5164,22 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg); __ bind(deferred->check_maps()); } - UniqueSet<Map> map_set = instr->hydrogen()->map_set(); + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); Label success; - for (int i = 0; i < map_set.size() - 1; i++) { - Handle<Map> map = map_set.at(i).handle(); + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); __ CompareMap(map_reg, map, &success); __ b(eq, &success); } - Handle<Map> map = map_set.at(map_set.size() - 1).handle(); + Handle<Map> map = maps->at(maps->size() - 1).handle(); __ CompareMap(map_reg, map, &success); - if (instr->hydrogen()->has_migration_target()) { + if (instr->hydrogen()->HasMigrationTarget()) { __ b(ne, deferred->entry()); } else { DeoptimizeIf(ne, instr->environment()); @@ -5301,7 +5349,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { __ push(size); } else { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); - __ Push(Smi::FromInt(size)); + if (size >= 0 && size <= Smi::kMaxValue) { + __ Push(Smi::FromInt(size)); + } else { + // We should never get here at runtime => abort + __ stop("invalid allocation size"); + return; + } } int flags = AllocateDoubleAlignFlag::encode( @@ -5381,10 +5435,11 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { - FastNewClosureStub stub(instr->hydrogen()->strict_mode(), + FastNewClosureStub stub(isolate(), + instr->hydrogen()->strict_mode(), instr->hydrogen()->is_generator()); __ mov(r2, Operand(instr->hydrogen()->shared_info())); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ mov(r2, Operand(instr->hydrogen()->shared_info())); __ mov(r1, Operand(pretenure ? factory()->true_value() @@ -5421,13 +5476,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Handle<String> type_name) { Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); - if (type_name->Equals(heap()->number_string())) { + Factory* factory = isolate()->factory(); + if (String::Equals(type_name, factory->number_string())) { __ JumpIfSmi(input, true_label); __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); final_branch_condition = eq; - } else if (type_name->Equals(heap()->string_string())) { + } else if (String::Equals(type_name, factory->string_string())) { __ JumpIfSmi(input, false_label); __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); __ b(ge, false_label); @@ -5435,22 +5491,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ tst(scratch, Operand(1 << Map::kIsUndetectable)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->symbol_string())) { + } else if (String::Equals(type_name, factory->symbol_string())) { __ JumpIfSmi(input, false_label); __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); final_branch_condition = eq; - } else if (type_name->Equals(heap()->boolean_string())) { + } else if (String::Equals(type_name, factory->boolean_string())) { __ CompareRoot(input, Heap::kTrueValueRootIndex); __ b(eq, true_label); __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = eq; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { + } else if (FLAG_harmony_typeof && + String::Equals(type_name, factory->null_string())) { __ CompareRoot(input, Heap::kNullValueRootIndex); final_branch_condition = eq; - } else if (type_name->Equals(heap()->undefined_string())) { + } else if (String::Equals(type_name, factory->undefined_string())) { __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ b(eq, true_label); __ JumpIfSmi(input, false_label); @@ -5460,7 +5517,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ tst(scratch, Operand(1 << Map::kIsUndetectable)); final_branch_condition = ne; - } else if (type_name->Equals(heap()->function_string())) { + } else if (String::Equals(type_name, factory->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); Register type_reg = scratch; __ JumpIfSmi(input, false_label); @@ -5469,7 +5526,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->object_string())) { + } else if (String::Equals(type_name, factory->object_string())) { Register map = scratch; __ JumpIfSmi(input, false_label); if (!FLAG_harmony_typeof) { @@ -5607,12 +5664,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(hs, &done); - PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); + Handle<Code> stack_check = isolate()->builtins()->StackCheck(); + PredictableCodeSizeScope predictable(masm(), + CallCodeSize(stack_check, RelocInfo::CODE_TARGET)); ASSERT(instr->context()->IsRegister()); ASSERT(ToRegister(instr->context()).is(cp)); - CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); + CallCode(stack_check, RelocInfo::CODE_TARGET, instr); __ bind(&done); } else { ASSERT(instr->hydrogen()->is_backwards_branch()); @@ -5716,13 +5773,61 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { } +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) { + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + __ Push(object); + __ Push(index); + __ mov(cp, Operand::Zero()); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(r0, result); +} + + void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register result, + Register object, + Register index) + : LDeferredCode(codegen), + instr_(instr), + result_(result), + object_(object), + index_(index) { + } + virtual void Generate() V8_OVERRIDE { + codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); + } + virtual LInstruction* instr() V8_OVERRIDE { return instr_; } + private: + LLoadFieldByIndex* instr_; + Register result_; + Register object_; + Register index_; + }; + Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, result, object, index); + Label out_of_object, done; + + __ tst(index, Operand(Smi::FromInt(1))); + __ b(ne, deferred->entry()); + __ mov(index, Operand(index, ASR, 1)); + __ cmp(index, Operand::Zero()); __ b(lt, &out_of_object); @@ -5738,6 +5843,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); __ bind(&done); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 21da500d01..3e05c328cb 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ @@ -35,7 +12,7 @@ #include "lithium-codegen.h" #include "safepoint-table.h" #include "scopes.h" -#include "v8utils.h" +#include "utils.h" namespace v8 { namespace internal { @@ -141,6 +118,10 @@ class LCodeGen: public LCodeGenBase { void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); + void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register result, + Register object, + Register index); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -182,8 +163,6 @@ class LCodeGen: public LCodeGenBase { int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - void Abort(BailoutReason reason); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void SaveCallerDoubles(); @@ -205,6 +184,8 @@ class LCodeGen: public LCodeGenBase { RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS }; + int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode); + void CallCode( Handle<Code> code, RelocInfo::Mode mode, @@ -258,7 +239,6 @@ class LCodeGen: public LCodeGenBase { LEnvironment* environment, Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition condition, LEnvironment* environment); - void ApplyCheckIf(Condition condition, LBoundsCheck* check); void AddToTranslation(LEnvironment* environment, Translation* translation, diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index 0c6b2adadf..fe0ef144ab 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -33,11 +10,22 @@ namespace v8 { namespace internal { -static const Register kSavedValueRegister = { 9 }; +// We use the root register to spill a value while breaking a cycle in parallel +// moves. We don't need access to roots while resolving the move list and using +// the root register has two advantages: +// - It is not in crankshaft allocatable registers list, so it can't interfere +// with any of the moves we are resolving. +// - We don't need to push it on the stack, as we can reload it with its value +// once we have resolved a cycle. +#define kSavedValueRegister kRootRegister + LGapResolver::LGapResolver(LCodeGen* owner) : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), - saved_destination_(NULL) { } + saved_destination_(NULL), need_to_restore_root_(false) { } + + +#define __ ACCESS_MASM(cgen_->masm()) void LGapResolver::Resolve(LParallelMove* parallel_move) { @@ -67,6 +55,12 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) { } } + if (need_to_restore_root_) { + ASSERT(kSavedValueRegister.is(kRootRegister)); + __ InitializeRootRegister(); + need_to_restore_root_ = false; + } + moves_.Rewind(0); } @@ -155,20 +149,21 @@ void LGapResolver::Verify() { #endif } -#define __ ACCESS_MASM(cgen_->masm()) void LGapResolver::BreakCycle(int index) { - // We save in a register the value that should end up in the source of - // moves_[root_index]. After performing all moves in the tree rooted - // in that move, we save the value to that source. + // We save in a register the source of that move and we remember its + // destination. Then we mark this move as resolved so the cycle is + // broken and we can perform the other moves. ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); ASSERT(!in_cycle_); in_cycle_ = true; LOperand* source = moves_[index].source(); saved_destination_ = moves_[index].destination(); if (source->IsRegister()) { + need_to_restore_root_ = true; __ mov(kSavedValueRegister, cgen_->ToRegister(source)); } else if (source->IsStackSlot()) { + need_to_restore_root_ = true; __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); @@ -186,7 +181,6 @@ void LGapResolver::RestoreValue() { ASSERT(in_cycle_); ASSERT(saved_destination_ != NULL); - // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. if (saved_destination_->IsRegister()) { __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); } else if (saved_destination_->IsStackSlot()) { @@ -226,20 +220,15 @@ void LGapResolver::EmitMove(int index) { } else { ASSERT(destination->IsStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); - if (in_cycle_) { - if (!destination_operand.OffsetIsUint12Encodable()) { - // ip is overwritten while saving the value to the destination. - // Therefore we can't use ip. It is OK if the read from the source - // destroys ip, since that happens before the value is read. - __ vldr(kScratchDoubleReg.low(), source_operand); - __ vstr(kScratchDoubleReg.low(), destination_operand); - } else { - __ ldr(ip, source_operand); - __ str(ip, destination_operand); - } + if (!destination_operand.OffsetIsUint12Encodable()) { + // ip is overwritten while saving the value to the destination. + // Therefore we can't use ip. It is OK if the read from the source + // destroys ip, since that happens before the value is read. + __ vldr(kScratchDoubleReg.low(), source_operand); + __ vstr(kScratchDoubleReg.low(), destination_operand); } else { - __ ldr(kSavedValueRegister, source_operand); - __ str(kSavedValueRegister, destination_operand); + __ ldr(ip, source_operand); + __ str(ip, destination_operand); } } @@ -261,14 +250,14 @@ void LGapResolver::EmitMove(int index) { } else { ASSERT(destination->IsStackSlot()); ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + need_to_restore_root_ = true; Representation r = cgen_->IsSmi(constant_source) ? Representation::Smi() : Representation::Integer32(); if (cgen_->IsInteger32(constant_source)) { __ mov(kSavedValueRegister, Operand(cgen_->ToRepresentation(constant_source, r))); } else { - __ Move(kSavedValueRegister, - cgen_->ToHandle(constant_source)); + __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source)); } __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); } @@ -290,16 +279,11 @@ void LGapResolver::EmitMove(int index) { ASSERT(destination->IsDoubleStackSlot()); MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { - // kSavedDoubleValueRegister was used to break the cycle, - // but kSavedValueRegister is free. - MemOperand source_high_operand = - cgen_->ToHighMemOperand(source); - MemOperand destination_high_operand = - cgen_->ToHighMemOperand(destination); - __ ldr(kSavedValueRegister, source_operand); - __ str(kSavedValueRegister, destination_operand); - __ ldr(kSavedValueRegister, source_high_operand); - __ str(kSavedValueRegister, destination_high_operand); + // kScratchDoubleReg was used to break the cycle. + __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg); + __ vldr(kScratchDoubleReg, source_operand); + __ vstr(kScratchDoubleReg, destination_operand); + __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg); } else { __ vldr(kScratchDoubleReg, source_operand); __ vstr(kScratchDoubleReg, destination_operand); diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h index 044c2864a4..73914e4daf 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.h +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h @@ -1,29 +1,6 @@ // Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ #define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ @@ -76,6 +53,10 @@ class LGapResolver V8_FINAL BASE_EMBEDDED { int root_index_; bool in_cycle_; LOperand* saved_destination_; + + // We use the root register as a scratch in a few places. When that happens, + // this flag is set to indicate that it needs to be restored. + bool need_to_restore_root_; }; } } // namespace v8::internal diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 2bfe09f768..9752622447 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <limits.h> // For LONG_MIN, LONG_MAX. @@ -100,19 +77,31 @@ int MacroAssembler::CallSize( int size = 2 * kInstrSize; Instr mov_instr = cond | MOV | LeaveCC; intptr_t immediate = reinterpret_cast<intptr_t>(target); - if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) { + if (!Operand(immediate, rmode).is_single_instruction(isolate(), + this, + mov_instr)) { size += kInstrSize; } return size; } -int MacroAssembler::CallSizeNotPredictableCodeSize( - Address target, RelocInfo::Mode rmode, Condition cond) { +int MacroAssembler::CallStubSize( + CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { + return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); +} + + +int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate, + Address target, + RelocInfo::Mode rmode, + Condition cond) { int size = 2 * kInstrSize; Instr mov_instr = cond | MOV | LeaveCC; intptr_t immediate = reinterpret_cast<intptr_t>(target); - if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) { + if (!Operand(immediate, rmode).is_single_instruction(isolate, + NULL, + mov_instr)) { size += kInstrSize; } return size; @@ -272,11 +261,11 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { void MacroAssembler::And(Register dst, Register src1, const Operand& src2, Condition cond) { if (!src2.is_reg() && - !src2.must_output_reloc_info(this) && + !src2.must_output_reloc_info(isolate(), this) && src2.immediate() == 0) { mov(dst, Operand::Zero(), LeaveCC, cond); - } else if (!src2.is_single_instruction(this) && - !src2.must_output_reloc_info(this) && + } else if (!src2.is_single_instruction(isolate(), this) && + !src2.must_output_reloc_info(isolate(), this) && CpuFeatures::IsSupported(ARMv7) && IsPowerOf2(src2.immediate() + 1)) { ubfx(dst, src1, 0, @@ -549,7 +538,8 @@ void MacroAssembler::RecordWrite(Register object, if (lr_status == kLRHasNotBeenSaved) { push(lr); } - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, + fp_mode); CallStub(&stub); if (lr_status == kLRHasNotBeenSaved) { pop(lr); @@ -598,7 +588,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. } push(lr); StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(fp_mode); + StoreBufferOverflowStub(isolate(), fp_mode); CallStub(&store_buffer_overflow); pop(lr); bind(&done); @@ -650,7 +640,7 @@ void MacroAssembler::PopSafepointRegisters() { void MacroAssembler::PushSafepointRegistersAndDoubles() { // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); + ASSERT(!Serializer::enabled(isolate())); PushSafepointRegisters(); // Only save allocatable registers. ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14)); @@ -664,7 +654,7 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() { void MacroAssembler::PopSafepointRegistersAndDoubles() { // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); + ASSERT(!Serializer::enabled(isolate())); // Only save allocatable registers. ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14)); ASSERT(DwVfpRegister::NumReservedRegisters() == 2); @@ -706,7 +696,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); + ASSERT(!Serializer::enabled(isolate())); // General purpose registers are pushed last on the stack. int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; @@ -789,6 +779,14 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { // If needed, restore wanted bits of FPSCR. Label fpscr_done; vmrs(scratch); + if (emit_debug_code()) { + Label rounding_mode_correct; + tst(scratch, Operand(kVFPRoundingModeMask)); + b(eq, &rounding_mode_correct); + // Don't call Assert here, since Runtime_Abort could re-enter here. + stop("Default rounding mode not set"); + bind(&rounding_mode_correct); + } tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); b(ne, &fpscr_done); orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); @@ -912,7 +910,7 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } else { PredictableCodeSizeScope predictible_code_size_scope( - this, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + this, kNoCodeAgeSequenceLength); // The following three instructions must remain together and unmodified // for code aging to work properly. if (isolate()->IsCodePreAgingActive()) { @@ -989,7 +987,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { } if (FLAG_enable_ool_constant_pool) { str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); - LoadConstantPoolPointerRegister(); } mov(ip, Operand(CodeObject())); str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); @@ -1321,15 +1318,13 @@ void MacroAssembler::IsObjectNameType(Register object, } -#ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { mov(r0, Operand::Zero()); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); - CEntryStub ces(1); + CEntryStub ces(isolate(), 1); ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); + Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } -#endif void MacroAssembler::PushTryHandler(StackHandler::Kind kind, @@ -1755,7 +1750,7 @@ void MacroAssembler::Allocate(int object_size, object_size -= bits; shift += 8; Operand bits_operand(bits); - ASSERT(bits_operand.is_single_instruction(this)); + ASSERT(bits_operand.is_single_instruction(isolate(), this)); add(scratch2, source, bits_operand, SetCC, cond); source = scratch2; cond = cc; @@ -2305,12 +2300,12 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond); + Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); } void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond); + Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } @@ -2339,10 +2334,7 @@ void MacroAssembler::CallApiFunctionAndReturn( Label profiler_disabled; Label end_profiler_check; - bool* is_profiling_flag = - isolate()->cpu_profiler()->is_profiling_address(); - STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); - mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag))); + mov(r9, Operand(ExternalReference::is_profiling_address(isolate()))); ldrb(r9, MemOperand(r9, 0)); cmp(r9, Operand(0)); b(eq, &profiler_disabled); @@ -2375,7 +2367,7 @@ void MacroAssembler::CallApiFunctionAndReturn( // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(this, r3); if (FLAG_log_timer_events) { @@ -2455,14 +2447,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { } -void MacroAssembler::IllegalOperation(int num_arguments) { - if (num_arguments > 0) { - add(sp, sp, Operand(num_arguments * kPointerSize)); - } - LoadRoot(r0, Heap::kUndefinedValueRootIndex); -} - - void MacroAssembler::IndexFromHash(Register hash, Register index) { // If the hash field contains an array index pick it out. The assert checks // that the constants for the maximum number of digits for an array index @@ -2580,7 +2564,7 @@ void MacroAssembler::TruncateDoubleToI(Register result, sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. vstr(double_input, MemOperand(sp, 0)); - DoubleToIStub stub(sp, result, 0, true, true); + DoubleToIStub stub(isolate(), sp, result, 0, true, true); CallStub(&stub); add(sp, sp, Operand(kDoubleSize)); @@ -2602,7 +2586,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, // If we fell through then inline version didn't succeed - call stub instead. push(lr); - DoubleToIStub stub(object, + DoubleToIStub stub(isolate(), + object, result, HeapNumber::kValueOffset - kHeapObjectTag, true, @@ -2657,10 +2642,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. - if (f->nargs >= 0 && f->nargs != num_arguments) { - IllegalOperation(num_arguments); - return; - } + CHECK(f->nargs < 0 || f->nargs == num_arguments); // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we @@ -2668,7 +2650,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // smarter. mov(r0, Operand(num_arguments)); mov(r1, Operand(ExternalReference(f, isolate()))); - CEntryStub stub(1, save_doubles); + CEntryStub stub(isolate(), 1, save_doubles); CallStub(&stub); } @@ -2678,7 +2660,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext, mov(r0, Operand(num_arguments)); mov(r1, Operand(ext)); - CEntryStub stub(1); + CEntryStub stub(isolate(), 1); CallStub(&stub); } @@ -2710,8 +2692,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); #endif mov(r1, Operand(builtin)); - CEntryStub stub(1); - Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); + CEntryStub stub(isolate(), 1); + Jump(stub.GetCode(), RelocInfo::CODE_TARGET); } @@ -3794,36 +3776,19 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { void MacroAssembler::ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch) { - Label above_zero; Label done; - Label in_bounds; - - VFPCompareAndSetFlags(input_reg, 0.0); - b(gt, &above_zero); - // Double value is less than zero, NaN or Inf, return 0. - mov(result_reg, Operand::Zero()); - b(al, &done); - - // Double value is >= 255, return 255. - bind(&above_zero); + // Handle inputs >= 255 (including +infinity). Vmov(double_scratch, 255.0, result_reg); - VFPCompareAndSetFlags(input_reg, double_scratch); - b(le, &in_bounds); mov(result_reg, Operand(255)); - b(al, &done); - - // In 0-255 range, round and truncate. - bind(&in_bounds); - // Save FPSCR. - vmrs(ip); - // Set rounding mode to round to the nearest integer by clearing bits[23:22]. - bic(result_reg, ip, Operand(kVFPRoundingModeMask)); - vmsr(result_reg); - vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding); + VFPCompareAndSetFlags(input_reg, double_scratch); + b(ge, &done); + + // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest + // rounding mode will provide the correct result. + vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); vmov(result_reg, double_scratch.low()); - // Restore FPSCR. - vmsr(ip); + bind(&done); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 6b6ecd32da..ba6f82571d 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ @@ -102,7 +79,11 @@ class MacroAssembler: public Assembler { static int CallSize(Register target, Condition cond = al); void Call(Register target, Condition cond = al); int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); - static int CallSizeNotPredictableCodeSize(Address target, + int CallStubSize(CodeStub* stub, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + Condition cond = al); + static int CallSizeNotPredictableCodeSize(Isolate* isolate, + Address target, RelocInfo::Mode rmode, Condition cond = al); void Call(Address target, RelocInfo::Mode rmode, @@ -627,12 +608,10 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); -#ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support void DebugBreak(); -#endif // --------------------------------------------------------------------------- // Exception handling @@ -951,10 +930,6 @@ class MacroAssembler: public Assembler { } - // Generates code for reporting that an illegal operation has - // occurred. - void IllegalOperation(int num_arguments); - // Picks out an array index from the hash field. // Register use: // hash - holds the index's hash. Clobbered. @@ -1524,11 +1499,12 @@ class FrameAndConstantPoolScope { type_(type), old_has_frame_(masm->has_frame()), old_constant_pool_available_(masm->is_constant_pool_available()) { + // We only want to enable constant pool access for non-manual frame scopes + // to ensure the constant pool pointer is valid throughout the scope. + ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); masm->set_has_frame(true); masm->set_constant_pool_available(true); - if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) { - masm->EnterFrame(type, !old_constant_pool_available_); - } + masm->EnterFrame(type, !old_constant_pool_available_); } ~FrameAndConstantPoolScope() { diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index cbc34e10b9..e511554eff 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -1043,7 +1020,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { ExternalReference stack_guard_check = ExternalReference::re_check_stack_guard_state(isolate()); __ mov(ip, Operand(stack_guard_check)); - DirectCEntryStub stub; + DirectCEntryStub stub(isolate()); stub.GenerateCall(masm_, ip); // Drop the return address from the stack. @@ -1094,7 +1071,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, ASSERT(*return_address <= re_code->instruction_start() + re_code->instruction_size()); - MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); + Object* result = Execution::HandleStackGuardInterrupt(isolate); if (*code_handle != re_code) { // Return address no longer valid int delta = code_handle->address() - re_code->address(); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 8d9d515c76..4b18b274d7 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 8f7c1e8bb2..80b46e04df 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include <stdarg.h> #include <stdlib.h> @@ -773,8 +750,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { z_flag_FPSCR_ = false; c_flag_FPSCR_ = false; v_flag_FPSCR_ = false; - FPSCR_rounding_mode_ = RZ; - FPSCR_default_NaN_mode_ = true; + FPSCR_rounding_mode_ = RN; + FPSCR_default_NaN_mode_ = false; inv_op_vfp_flag_ = false; div_zero_vfp_flag_ = false; @@ -2936,7 +2913,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && (instr->Bit(8) == 1)) { // vcvt.f64.s32 Dd, Dd, #<fbits> - int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); + int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5)); int fixed_value = get_sinteger_from_s_register(vd * 2); double divide = 1 << fraction_bits; set_d_register_from_double(vd, fixed_value / divide); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 24d7fe58c4..bbe87bcbe2 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. // Declares a Simulator for ARM instructions if we are not generating a native diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index c595e42745..fd53b9782d 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1,29 +1,6 @@ // Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. #include "v8.h" @@ -431,6 +408,22 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = descriptors->GetFieldType(descriptor); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + __ CompareMap(scratch1, it.Current(), &do_store); + it.Advance(); + if (it.Done()) { + __ b(ne, miss_label); + break; + } + __ b(eq, &do_store); + } + __ bind(&do_store); + } } else if (representation.IsDouble()) { Label do_store, heap_number; __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); @@ -593,6 +586,22 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); + HeapType* field_type = lookup->GetFieldType(); + HeapType::Iterator<Map> it = field_type->Classes(); + if (!it.Done()) { + __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + Label do_store; + while (true) { + __ CompareMap(scratch1, it.Current(), &do_store); + it.Advance(); + if (it.Done()) { + __ b(ne, miss_label); + break; + } + __ b(eq, &do_store); + } + __ bind(&do_store); + } } else if (representation.IsDouble()) { // Load the double storage. if (index < 0) { @@ -801,7 +810,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, __ mov(api_function_address, Operand(ref)); // Jump to stub. - CallApiFunctionStub stub(is_store, call_data_undefined, argc); + CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); __ TailCallStub(&stub); } @@ -836,7 +845,9 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, int depth = 0; Handle<JSObject> current = Handle<JSObject>::null(); - if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); + if (type->IsConstant()) { + current = Handle<JSObject>::cast(type->AsConstant()->Value()); + } Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<Map> current_map = receiver_map; Handle<Map> holder_map(holder->map()); @@ -859,7 +870,7 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, name = factory()->InternalizeString(Handle<String>::cast(name)); } ASSERT(current.is_null() || - current->property_dictionary()->FindEntry(*name) == + current->property_dictionary()->FindEntry(name) == NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, @@ -999,15 +1010,17 @@ void LoadStubCompiler::GenerateLoadField(Register reg, Representation representation) { if (!reg.is(receiver())) __ mov(receiver(), reg); if (kind() == Code::LOAD_IC) { - LoadFieldStub stub(field.is_inobject(holder), + LoadFieldStub stub(isolate(), + field.is_inobject(holder), field.translate(holder), representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); + GenerateTailCall(masm(), stub.GetCode()); } else { - KeyedLoadFieldStub stub(field.is_inobject(holder), + KeyedLoadFieldStub stub(isolate(), + field.is_inobject(holder), field.translate(holder), representation); - GenerateTailCall(masm(), stub.GetCode(isolate())); + GenerateTailCall(masm(), stub.GetCode()); } } @@ -1061,7 +1074,7 @@ void LoadStubCompiler::GenerateLoadCallback( ExternalReference ref = ExternalReference(&fun, type, isolate()); __ mov(getter_address_reg, Operand(ref)); - CallApiGetterStub stub; + CallApiGetterStub stub(isolate()); __ TailCallStub(&stub); } @@ -1154,19 +1167,6 @@ void LoadStubCompiler::GenerateLoadInterceptor( } -void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { - Label success; - // Check that the object is a boolean. - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(object, ip); - __ b(eq, &success); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(object, ip); - __ b(ne, miss); - __ bind(&success); -} - - Handle<Code> StoreStubCompiler::CompileStoreCallback( Handle<JSObject> object, Handle<JSObject> holder, |