diff options
author | Michaël Zasso <targos@protonmail.com> | 2017-03-21 10:16:54 +0100 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2017-03-25 09:44:10 +0100 |
commit | c459d8ea5d402c702948c860d9497b2230ff7e8a (patch) | |
tree | 56c282fc4d40e5cb613b47cf7be3ea0526ed5b6f /deps/v8/src/ia32 | |
parent | e0bc5a7361b1d29c3ed034155fd779ce6f44fb13 (diff) | |
download | node-new-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.gz |
deps: update V8 to 5.7.492.69
PR-URL: https://github.com/nodejs/node/pull/11752
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r-- | deps/v8/src/ia32/assembler-ia32.cc | 9 | ||||
-rw-r--r-- | deps/v8/src/ia32/assembler-ia32.h | 3 | ||||
-rw-r--r-- | deps/v8/src/ia32/code-stubs-ia32.cc | 568 | ||||
-rw-r--r-- | deps/v8/src/ia32/code-stubs-ia32.h | 18 | ||||
-rw-r--r-- | deps/v8/src/ia32/codegen-ia32.cc | 331 | ||||
-rw-r--r-- | deps/v8/src/ia32/deoptimizer-ia32.cc | 3 | ||||
-rw-r--r-- | deps/v8/src/ia32/interface-descriptors-ia32.cc | 9 | ||||
-rw-r--r-- | deps/v8/src/ia32/macro-assembler-ia32.cc | 292 | ||||
-rw-r--r-- | deps/v8/src/ia32/macro-assembler-ia32.h | 81 |
9 files changed, 41 insertions, 1273 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index d4de79ef59..021177478d 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -201,13 +201,18 @@ uint32_t RelocInfo::wasm_memory_size_reference() { return Memory::uint32_at(pc_); } +uint32_t RelocInfo::wasm_function_table_size_reference() { + DCHECK(IsWasmFunctionTableSizeReference(rmode_)); + return Memory::uint32_at(pc_); +} + void RelocInfo::unchecked_update_wasm_memory_reference( Address address, ICacheFlushMode flush_mode) { Memory::Address_at(pc_) = address; } -void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size, - ICacheFlushMode flush_mode) { +void RelocInfo::unchecked_update_wasm_size(uint32_t size, + ICacheFlushMode flush_mode) { Memory::uint32_at(pc_) = size; } diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 79f4125354..ddee696162 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -1434,9 +1434,6 @@ class Assembler : public AssemblerBase { return pc_offset() - label->pos(); } - // Mark generator continuation. - void RecordGeneratorContinuation(); - // Mark address of a debug break slot. void RecordDebugBreakSlot(RelocInfo::Mode mode); diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 9b2c51e99b..6afd1c4945 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -34,17 +34,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kNewArray); } -void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) { - Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry; - descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE); -} - -void FastFunctionBindStub::InitializeDescriptor( - CodeStubDescriptor* descriptor) { - Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry; - descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE); -} - void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, ExternalReference miss) { // Update the static counter each time a new code stub is generated. @@ -1225,9 +1214,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { if (cc == equal) { { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(edx); - __ Push(eax); - __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual); + __ Push(esi); + __ Call(strict() ? isolate()->builtins()->StrictEqual() + : isolate()->builtins()->Equal(), + RelocInfo::CODE_TARGET); + __ Pop(esi); } // Turn true into 0 and false into some non-zero value. STATIC_ASSERT(EQUAL == 0); @@ -1631,7 +1622,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { void CodeStub::GenerateFPStubs(Isolate* isolate) { // Generate if not already in cache. CEntryStub(isolate, 1, kSaveFPRegs).GetCode(); - isolate->set_fp_stubs_generated(true); } @@ -2030,40 +2020,6 @@ void StringCharFromCodeGenerator::GenerateSlow( __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); } - -void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - String::Encoding encoding) { - DCHECK(!scratch.is(dest)); - DCHECK(!scratch.is(src)); - DCHECK(!scratch.is(count)); - - // Nothing to do for zero characters. - Label done; - __ test(count, count); - __ j(zero, &done); - - // Make count the number of bytes to copy. - if (encoding == String::TWO_BYTE_ENCODING) { - __ shl(count, 1); - } - - Label loop; - __ bind(&loop); - __ mov_b(scratch, Operand(src, 0)); - __ mov_b(Operand(dest, 0), scratch); - __ inc(src); - __ inc(dest); - __ dec(count); - __ j(not_zero, &loop); - - __ bind(&done); -} - - void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm, Register left, Register right, @@ -2679,67 +2635,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ jmp(done); } - -// Probe the name dictionary in the |elements| register. Jump to the -// |done| label if a property with the given name is found leaving the -// index into the dictionary in |r0|. Jump to the |miss| label -// otherwise. -void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register r0, - Register r1) { - DCHECK(!elements.is(r0)); - DCHECK(!elements.is(r1)); - DCHECK(!name.is(r0)); - DCHECK(!name.is(r1)); - - __ AssertName(name); - - __ mov(r1, FieldOperand(elements, kCapacityOffset)); - __ shr(r1, kSmiTagSize); // convert smi to int - __ dec(r1); - - // Generate an unrolled loop that performs a few probes before - // giving up. Measurements done on Gmail indicate that 2 probes - // cover ~93% of loads from dictionaries. - for (int i = 0; i < kInlinedProbes; i++) { - // Compute the masked index: (hash + i + i * i) & mask. - __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); - __ shr(r0, Name::kHashShift); - if (i > 0) { - __ add(r0, Immediate(NameDictionary::GetProbeOffset(i))); - } - __ and_(r0, r1); - - // Scale the index by multiplying by the entry size. - STATIC_ASSERT(NameDictionary::kEntrySize == 3); - __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3 - - // Check if the key is identical to the name. - __ cmp(name, Operand(elements, - r0, - times_4, - kElementsStartOffset - kHeapObjectTag)); - __ j(equal, done); - } - - NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0, - POSITIVE_LOOKUP); - __ push(name); - __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); - __ shr(r0, Name::kHashShift); - __ push(r0); - __ CallStub(&stub); - - __ test(r1, r1); - __ j(zero, miss); - __ jmp(done); -} - - void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // This stub overrides SometimesSetsUpAFrame() to return false. That means // we cannot call anything that could cause a GC from this stub. @@ -3016,329 +2911,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { __ jmp(ecx); // Return to IC Miss stub, continuation still on stack. } -void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) { - __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister()); - KeyedStoreICStub stub(isolate(), state()); - stub.GenerateForTrampoline(masm); -} - -// value is on the stack already. -static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver, - Register key, Register vector, - Register slot, Register feedback, - bool is_polymorphic, Label* miss) { - // feedback initially contains the feedback array - Label next, next_loop, prepare_next; - Label load_smi_map, compare_map; - Label start_polymorphic; - Label pop_and_miss; - - __ push(receiver); - // Value, vector and slot are passed on the stack, so no need to save/restore - // them. - - Register receiver_map = receiver; - Register cached_map = vector; - - // Receiver might not be a heap object. - __ JumpIfSmi(receiver, &load_smi_map); - __ mov(receiver_map, FieldOperand(receiver, 0)); - __ bind(&compare_map); - __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0))); - - // A named keyed store might have a 2 element array, all other cases can count - // on an array with at least 2 {map, handler} pairs, so they can go right - // into polymorphic array handling. - __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); - __ j(not_equal, &start_polymorphic); - - // found, now call handler. - Register handler = feedback; - DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister())); - __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1))); - __ pop(receiver); - __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); - __ jmp(handler); - - // Polymorphic, we have to loop from 2 to N - __ bind(&start_polymorphic); - __ push(key); - Register counter = key; - __ mov(counter, Immediate(Smi::FromInt(2))); - - if (!is_polymorphic) { - // If is_polymorphic is false, we may only have a two element array. - // Check against length now in that case. - __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); - __ j(greater_equal, &pop_and_miss); - } - - __ bind(&next_loop); - __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, - FixedArray::kHeaderSize)); - __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); - __ j(not_equal, &prepare_next); - __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); - __ pop(key); - __ pop(receiver); - __ jmp(handler); - - __ bind(&prepare_next); - __ add(counter, Immediate(Smi::FromInt(2))); - __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); - __ j(less, &next_loop); - - // We exhausted our array of map handler pairs. - __ bind(&pop_and_miss); - __ pop(key); - __ pop(receiver); - __ jmp(miss); - - __ bind(&load_smi_map); - __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); - __ jmp(&compare_map); -} - - -static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver, - Register key, Register vector, - Register slot, Register weak_cell, - Label* miss) { - // The store ic value is on the stack. - DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister())); - - // feedback initially contains the feedback array - Label compare_smi_map; - - // Move the weak map into the weak_cell register. - Register ic_map = weak_cell; - __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset)); - - // Receiver might not be a heap object. - __ JumpIfSmi(receiver, &compare_smi_map); - __ cmp(ic_map, FieldOperand(receiver, 0)); - __ j(not_equal, miss); - __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize)); - // jump to the handler. - __ jmp(weak_cell); - - // In microbenchmarks, it made sense to unroll this code so that the call to - // the handler is duplicated for a HeapObject receiver and a Smi receiver. - __ bind(&compare_smi_map); - __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex); - __ j(not_equal, miss); - __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize)); - // jump to the handler. - __ jmp(weak_cell); -} - -void KeyedStoreICStub::Generate(MacroAssembler* masm) { - GenerateImpl(masm, false); -} - -void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { - GenerateImpl(masm, true); -} - - -static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm, - Register receiver, Register key, - Register vector, Register slot, - Register feedback, Label* miss) { - // feedback initially contains the feedback array - Label next, next_loop, prepare_next; - Label load_smi_map, compare_map; - Label transition_call; - Label pop_and_miss; - - __ push(receiver); - // Value, vector and slot are passed on the stack, so no need to save/restore - // them. - - Register receiver_map = receiver; - Register cached_map = vector; - - // Receiver might not be a heap object. - __ JumpIfSmi(receiver, &load_smi_map); - __ mov(receiver_map, FieldOperand(receiver, 0)); - __ bind(&compare_map); - - // Polymorphic, we have to loop from 0 to N - 1 - __ push(key); - // Current stack layout: - // - esp[0] -- key - // - esp[4] -- receiver - // - esp[8] -- return address - // - esp[12] -- vector - // - esp[16] -- slot - // - esp[20] -- value - // - // Required stack layout for handler call (see StoreWithVectorDescriptor): - // - esp[0] -- return address - // - esp[4] -- vector - // - esp[8] -- slot - // - esp[12] -- value - // - receiver, key, handler in registers. - Register counter = key; - __ mov(counter, Immediate(Smi::kZero)); - __ bind(&next_loop); - __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, - FixedArray::kHeaderSize)); - __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); - __ j(not_equal, &prepare_next); - __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex); - __ j(not_equal, &transition_call); - __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size, - FixedArray::kHeaderSize + 2 * kPointerSize)); - __ pop(key); - __ pop(receiver); - __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize)); - __ jmp(feedback); - - __ bind(&transition_call); - // Current stack layout: - // - esp[0] -- key - // - esp[4] -- receiver - // - esp[8] -- return address - // - esp[12] -- vector - // - esp[16] -- slot - // - esp[20] -- value - // - // Required stack layout for handler call (see StoreTransitionDescriptor): - // - esp[0] -- return address - // - esp[4] -- vector - // - esp[8] -- slot - // - esp[12] -- value - // - receiver, key, map, handler in registers. - __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size, - FixedArray::kHeaderSize + 2 * kPointerSize)); - __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize)); - - __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset)); - // The weak cell may have been cleared. - __ JumpIfSmi(cached_map, &pop_and_miss); - DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister())); - __ mov(StoreTransitionDescriptor::MapRegister(), cached_map); - - // Call store transition handler using StoreTransitionDescriptor calling - // convention. - __ pop(key); - __ pop(receiver); - // Ensure that the transition handler we are going to call has the same - // number of stack arguments which means that we don't have to adapt them - // before the call. - STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3); - STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3); - STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount - - StoreWithVectorDescriptor::kValue == - StoreTransitionDescriptor::kParameterCount - - StoreTransitionDescriptor::kValue); - STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount - - StoreWithVectorDescriptor::kSlot == - StoreTransitionDescriptor::kParameterCount - - StoreTransitionDescriptor::kSlot); - STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount - - StoreWithVectorDescriptor::kVector == - StoreTransitionDescriptor::kParameterCount - - StoreTransitionDescriptor::kVector); - __ jmp(feedback); - - __ bind(&prepare_next); - __ add(counter, Immediate(Smi::FromInt(3))); - __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); - __ j(less, &next_loop); - - // We exhausted our array of map handler pairs. - __ bind(&pop_and_miss); - __ pop(key); - __ pop(receiver); - __ jmp(miss); - - __ bind(&load_smi_map); - __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); - __ jmp(&compare_map); -} - -void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx - Register key = StoreWithVectorDescriptor::NameRegister(); // ecx - Register value = StoreWithVectorDescriptor::ValueRegister(); // eax - Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx - Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi - Label miss; - - if (StoreWithVectorDescriptor::kPassLastArgsOnStack) { - // Current stack layout: - // - esp[8] -- value - // - esp[4] -- slot - // - esp[0] -- return address - STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2); - STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3); - if (in_frame) { - __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor"); - // If the vector is not on the stack, then insert the vector beneath - // return address in order to prepare for calling handler with - // StoreWithVector calling convention. - __ push(Operand(esp, 0)); - __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister()); - __ RecordComment("]"); - } else { - __ mov(vector, Operand(esp, 1 * kPointerSize)); - } - __ mov(slot, Operand(esp, 2 * kPointerSize)); - } - - Register scratch = value; - __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, - FixedArray::kHeaderSize)); - - // Is it a weak cell? - Label try_array; - Label not_array, smi_key, key_okay; - __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex); - __ j(not_equal, &try_array); - HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); - - // Is it a fixed array? - __ bind(&try_array); - __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex); - __ j(not_equal, ¬_array); - HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch, - &miss); - - __ bind(¬_array); - Label try_poly_name; - __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex); - __ j(not_equal, &try_poly_name); - - Handle<Code> megamorphic_stub = - KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); - __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET); - - __ bind(&try_poly_name); - // We might have a name in feedback, and a fixed array in the next slot. - __ cmp(key, scratch); - __ j(not_equal, &miss); - // If the name comparison succeeded, we know we have a fixed array with - // at least one map/handler pair. - __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false, - &miss); - - __ bind(&miss); - KeyedStoreIC::GenerateMiss(masm); -} - - void CallICTrampolineStub::Generate(MacroAssembler* masm) { __ EmitLoadTypeFeedbackVector(ebx); CallICStub stub(isolate(), state()); @@ -3692,136 +3264,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { GenerateCase(masm, FAST_ELEMENTS); } - -void FastNewObjectStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- edi : target - // -- edx : new target - // -- esi : context - // -- esp[0] : return address - // ----------------------------------- - __ AssertFunction(edi); - __ AssertReceiver(edx); - - // Verify that the new target is a JSFunction. - Label new_object; - __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx); - __ j(not_equal, &new_object); - - // Load the initial map and verify that it's in fact a map. - __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(ecx, &new_object); - __ CmpObjectType(ecx, MAP_TYPE, ebx); - __ j(not_equal, &new_object); - - // Fall back to runtime if the target differs from the new target's - // initial map constructor. - __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset)); - __ j(not_equal, &new_object); - - // Allocate the JSObject on the heap. - Label allocate, done_allocate; - __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset)); - __ lea(ebx, Operand(ebx, times_pointer_size, 0)); - __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS); - __ bind(&done_allocate); - - // Initialize the JSObject fields. - __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); - __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), - masm->isolate()->factory()->empty_fixed_array()); - __ mov(FieldOperand(eax, JSObject::kElementsOffset), - masm->isolate()->factory()->empty_fixed_array()); - STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); - __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize)); - - // ----------- S t a t e ------------- - // -- eax : result (tagged) - // -- ebx : result fields (untagged) - // -- edi : result end (untagged) - // -- ecx : initial map - // -- esi : context - // -- esp[0] : return address - // ----------------------------------- - - // Perform in-object slack tracking if requested. - Label slack_tracking; - STATIC_ASSERT(Map::kNoSlackTracking == 0); - __ test(FieldOperand(ecx, Map::kBitField3Offset), - Immediate(Map::ConstructionCounter::kMask)); - __ j(not_zero, &slack_tracking, Label::kNear); - { - // Initialize all in-object fields with undefined. - __ LoadRoot(edx, Heap::kUndefinedValueRootIndex); - __ InitializeFieldsWithFiller(ebx, edi, edx); - __ Ret(); - } - __ bind(&slack_tracking); - { - // Decrease generous allocation count. - STATIC_ASSERT(Map::ConstructionCounter::kNext == 32); - __ sub(FieldOperand(ecx, Map::kBitField3Offset), - Immediate(1 << Map::ConstructionCounter::kShift)); - - // Initialize the in-object fields with undefined. - __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset)); - __ neg(edx); - __ lea(edx, Operand(edi, edx, times_pointer_size, 0)); - __ LoadRoot(edi, Heap::kUndefinedValueRootIndex); - __ InitializeFieldsWithFiller(ebx, edx, edi); - - // Initialize the remaining (reserved) fields with one pointer filler map. - __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset)); - __ lea(edx, Operand(ebx, edx, times_pointer_size, 0)); - __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex); - __ InitializeFieldsWithFiller(ebx, edx, edi); - - // Check if we can finalize the instance size. - Label finalize; - STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1); - __ test(FieldOperand(ecx, Map::kBitField3Offset), - Immediate(Map::ConstructionCounter::kMask)); - __ j(zero, &finalize, Label::kNear); - __ Ret(); - - // Finalize the instance size. - __ bind(&finalize); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(eax); - __ Push(ecx); - __ CallRuntime(Runtime::kFinalizeInstanceSize); - __ Pop(eax); - } - __ Ret(); - } - - // Fall back to %AllocateInNewSpace. - __ bind(&allocate); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ SmiTag(ebx); - __ Push(ecx); - __ Push(ebx); - __ CallRuntime(Runtime::kAllocateInNewSpace); - __ Pop(ecx); - } - __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset)); - __ lea(edi, Operand(eax, ebx, times_pointer_size, 0)); - STATIC_ASSERT(kHeapObjectTag == 1); - __ dec(edi); - __ jmp(&done_allocate); - - // Fall back to %NewObject. - __ bind(&new_object); - __ PopReturnAddressTo(ecx); - __ Push(edi); - __ Push(edx); - __ PushReturnAddressFrom(ecx); - __ TailCallRuntime(Runtime::kNewObject); -} - - void FastNewRestParameterStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edi : function diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index c1878f0207..649e2ccf16 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -16,16 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, class StringHelper : public AllStatic { public: - // Generate code for copying characters using the rep movs instruction. - // Copies ecx characters from esi to edi. Copying of overlapping regions is - // not supported. - static void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - String::Encoding encoding); - // Compares two flat one byte strings and returns result in eax. static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm, Register left, Register right, @@ -68,14 +58,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub { Handle<Name> name, Register r0); - static void GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register r0, - Register r1); - bool SometimesSetsUpAFrame() override { return false; } private: diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 18e53641e6..ccd159e299 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -485,309 +485,6 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) { #define __ ACCESS_MASM(masm) - -void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm, - Register receiver, - Register key, - Register value, - Register target_map, - AllocationSiteMode mode, - Label* allocation_memento_found) { - Register scratch = edi; - DCHECK(!AreAliased(receiver, key, value, target_map, scratch)); - - if (mode == TRACK_ALLOCATION_SITE) { - DCHECK(allocation_memento_found != NULL); - __ JumpIfJSArrayHasAllocationMemento( - receiver, scratch, allocation_memento_found); - } - - // Set transitioned map. - __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map); - __ RecordWriteField(receiver, - HeapObject::kMapOffset, - target_map, - scratch, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); -} - - -void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, - Register receiver, - Register key, - Register value, - Register target_map, - AllocationSiteMode mode, - Label* fail) { - // Return address is on the stack. - DCHECK(receiver.is(edx)); - DCHECK(key.is(ecx)); - DCHECK(value.is(eax)); - DCHECK(target_map.is(ebx)); - - Label loop, entry, convert_hole, gc_required, only_change_map; - - if (mode == TRACK_ALLOCATION_SITE) { - __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail); - } - - // Check for empty arrays, which only require a map transition and no changes - // to the backing store. - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); - __ j(equal, &only_change_map); - - __ push(eax); - __ push(ebx); - __ push(esi); - - __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset)); - - // Allocate new FixedDoubleArray. - // edx: receiver - // edi: length of source FixedArray (smi-tagged) - AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT); - __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi, - REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags); - - // eax: destination FixedDoubleArray - // edi: number of elements - // edx: receiver - __ mov(FieldOperand(eax, HeapObject::kMapOffset), - Immediate(masm->isolate()->factory()->fixed_double_array_map())); - __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi); - __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset)); - // Replace receiver's backing store with newly created FixedDoubleArray. - __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); - __ mov(ebx, eax); - __ RecordWriteField(edx, - JSObject::kElementsOffset, - ebx, - edi, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - - __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); - - // Prepare for conversion loop. - ExternalReference canonical_the_hole_nan_reference = - ExternalReference::address_of_the_hole_nan(); - XMMRegister the_hole_nan = xmm1; - __ movsd(the_hole_nan, - Operand::StaticVariable(canonical_the_hole_nan_reference)); - __ jmp(&entry); - - // Call into runtime if GC is required. - __ bind(&gc_required); - - // Restore registers before jumping into runtime. - __ pop(esi); - __ pop(ebx); - __ pop(eax); - __ jmp(fail); - - // Convert and copy elements - // esi: source FixedArray - __ bind(&loop); - __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize)); - // ebx: current element from source - // edi: index of current element - __ JumpIfNotSmi(ebx, &convert_hole); - - // Normal smi, convert it to double and store. - __ SmiUntag(ebx); - __ Cvtsi2sd(xmm0, ebx); - __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), - xmm0); - __ jmp(&entry); - - // Found hole, store hole_nan_as_double instead. - __ bind(&convert_hole); - - if (FLAG_debug_code) { - __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); - __ Assert(equal, kObjectFoundInSmiOnlyArray); - } - - __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), - the_hole_nan); - - __ bind(&entry); - __ sub(edi, Immediate(Smi::FromInt(1))); - __ j(not_sign, &loop); - - // Restore registers. - __ pop(esi); - __ pop(ebx); - __ pop(eax); - - __ bind(&only_change_map); - // eax: value - // ebx: target map - // Set transitioned map. - __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, - HeapObject::kMapOffset, - ebx, - edi, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); -} - - -void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, - Register receiver, - Register key, - Register value, - Register target_map, - AllocationSiteMode mode, - Label* fail) { - // Return address is on the stack. - DCHECK(receiver.is(edx)); - DCHECK(key.is(ecx)); - DCHECK(value.is(eax)); - DCHECK(target_map.is(ebx)); - - Label loop, entry, convert_hole, gc_required, only_change_map, success; - - if (mode == TRACK_ALLOCATION_SITE) { - __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail); - } - - // Check for empty arrays, which only require a map transition and no changes - // to the backing store. - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); - __ j(equal, &only_change_map); - - __ push(esi); - __ push(eax); - __ push(edx); - __ push(ebx); - - __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); - - // Allocate new FixedArray. - // ebx: length of source FixedDoubleArray (smi-tagged) - __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize)); - __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS); - - // eax: destination FixedArray - // ebx: number of elements - __ mov(FieldOperand(eax, HeapObject::kMapOffset), - Immediate(masm->isolate()->factory()->fixed_array_map())); - __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx); - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - - // Allocating heap numbers in the loop below can fail and cause a jump to - // gc_required. We can't leave a partly initialized FixedArray behind, - // so pessimistically fill it with holes now. - Label initialization_loop, initialization_loop_entry; - __ jmp(&initialization_loop_entry, Label::kNear); - __ bind(&initialization_loop); - __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), - masm->isolate()->factory()->the_hole_value()); - __ bind(&initialization_loop_entry); - __ sub(ebx, Immediate(Smi::FromInt(1))); - __ j(not_sign, &initialization_loop); - - __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); - __ jmp(&entry); - - // ebx: target map - // edx: receiver - // Set transitioned map. - __ bind(&only_change_map); - __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, - HeapObject::kMapOffset, - ebx, - edi, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ jmp(&success); - - // Call into runtime if GC is required. - __ bind(&gc_required); - __ pop(ebx); - __ pop(edx); - __ pop(eax); - __ pop(esi); - __ jmp(fail); - - // Box doubles into heap numbers. - // edi: source FixedDoubleArray - // eax: destination FixedArray - __ bind(&loop); - // ebx: index of current element (smi-tagged) - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32)); - __ j(equal, &convert_hole); - - // Non-hole double, copy value into a heap number. - __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); - // edx: new heap number - __ movsd(xmm0, - FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); - __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); - __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); - __ mov(esi, ebx); - __ RecordWriteArray(eax, - edx, - esi, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ jmp(&entry, Label::kNear); - - // Replace the-hole NaN with the-hole pointer. - __ bind(&convert_hole); - __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), - masm->isolate()->factory()->the_hole_value()); - - __ bind(&entry); - __ sub(ebx, Immediate(Smi::FromInt(1))); - __ j(not_sign, &loop); - - __ pop(ebx); - __ pop(edx); - // ebx: target map - // edx: receiver - // Set transitioned map. - __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); - __ RecordWriteField(edx, - HeapObject::kMapOffset, - ebx, - edi, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - // Replace receiver's backing store with newly created and filled FixedArray. - __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); - __ RecordWriteField(edx, - JSObject::kElementsOffset, - eax, - edi, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - - // Restore registers. - __ pop(eax); - __ pop(esi); - - __ bind(&success); -} - - void StringCharLoadGenerator::Generate(MacroAssembler* masm, Factory* factory, Register string, @@ -919,32 +616,24 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { return result; } +Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) { + if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge; -void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, - MarkingParity* parity) { - if (IsYoungSequence(isolate, sequence)) { - *age = kNoAgeCodeAge; - *parity = NO_MARKING_PARITY; - } else { - sequence++; // Skip the kCallOpcode byte - Address target_address = sequence + *reinterpret_cast<int*>(sequence) + - Assembler::kCallTargetAddressOffset; - Code* stub = GetCodeFromTargetAddress(target_address); - GetCodeAgeAndParity(stub, age, parity); - } + sequence++; // Skip the kCallOpcode byte + Address target_address = sequence + *reinterpret_cast<int*>(sequence) + + Assembler::kCallTargetAddressOffset; + Code* stub = GetCodeFromTargetAddress(target_address); + return GetAgeOfCodeAgeStub(stub); } - -void Code::PatchPlatformCodeAge(Isolate* isolate, - byte* sequence, - Code::Age age, - MarkingParity parity) { +void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, + Code::Age age) { uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); Assembler::FlushICache(isolate, sequence, young_length); } else { - Code* stub = GetCodeAgeStub(isolate, age, parity); + Code* stub = GetCodeAgeStub(isolate, age); CodePatcher patcher(isolate, sequence, young_length); patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); } diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 390f3a76a8..7410a46a61 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -164,8 +164,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { // Right trim the relocation info to free up remaining space. const int delta = reloc_info->length() - new_reloc_length; if (delta > 0) { - isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>( - reloc_info, delta); + isolate->heap()->RightTrimFixedArray(reloc_info, delta); } } diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc index 8ce78720de..cef6449ca0 100644 --- a/deps/v8/src/ia32/interface-descriptors-ia32.cc +++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc @@ -64,16 +64,11 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; } void FastNewClosureDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { - Register registers[] = {ebx}; + // SharedFunctionInfo, vector, slot index. + Register registers[] = {ebx, ecx, edx}; data->InitializePlatformSpecific(arraysize(registers), registers, NULL); } -void FastNewObjectDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {edi, edx}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - void FastNewRestParameterDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {edi}; diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 2fa9d0eda5..0c7c2203f0 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -810,67 +810,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type)); } -void MacroAssembler::CheckFastObjectElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - STATIC_ASSERT(FAST_ELEMENTS == 2); - STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); - cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleySmiElementValue)); - j(below_equal, fail, distance); - cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleyElementValue)); - j(above, fail, distance); -} - - -void MacroAssembler::CheckFastSmiElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleySmiElementValue)); - j(above, fail, distance); -} - - -void MacroAssembler::StoreNumberToDoubleElements( - Register maybe_number, - Register elements, - Register key, - Register scratch1, - XMMRegister scratch2, - Label* fail, - int elements_offset) { - Label smi_value, done; - JumpIfSmi(maybe_number, &smi_value, Label::kNear); - - CheckMap(maybe_number, - isolate()->factory()->heap_number_map(), - fail, - DONT_DO_SMI_CHECK); - - // Double value, turn potential sNaN into qNaN. - Move(scratch2, 1.0); - mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); - jmp(&done, Label::kNear); - - bind(&smi_value); - // Value is a smi. Convert to a double and store. - // Preserve original value. - mov(scratch1, maybe_number); - SmiUntag(scratch1); - Cvtsi2sd(scratch2, scratch1); - bind(&done); - movsd(FieldOperand(elements, key, times_4, - FixedDoubleArray::kHeaderSize - elements_offset), - scratch2); -} - - void MacroAssembler::CompareMap(Register obj, Handle<Map> map) { cmp(FieldOperand(obj, HeapObject::kMapOffset), map); } @@ -1654,139 +1593,6 @@ void MacroAssembler::AllocateHeapNumber(Register result, mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map)); } - -void MacroAssembler::AllocateTwoByteString(Register result, - Register length, - Register scratch1, - Register scratch2, - Register scratch3, - Label* gc_required) { - // Calculate the number of bytes needed for the characters in the string while - // observing object alignment. - DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - DCHECK(kShortSize == 2); - // scratch1 = length * 2 + kObjectAlignmentMask. - lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); - and_(scratch1, Immediate(~kObjectAlignmentMask)); - - // Allocate two byte string in new space. - Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, - REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required, - NO_ALLOCATION_FLAGS); - - // Set the map, length and hash field. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->string_map())); - mov(scratch1, length); - SmiTag(scratch1); - mov(FieldOperand(result, String::kLengthOffset), scratch1); - mov(FieldOperand(result, String::kHashFieldOffset), - Immediate(String::kEmptyHashField)); -} - - -void MacroAssembler::AllocateOneByteString(Register result, Register length, - Register scratch1, Register scratch2, - Register scratch3, - Label* gc_required) { - // Calculate the number of bytes needed for the characters in the string while - // observing object alignment. - DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - mov(scratch1, length); - DCHECK(kCharSize == 1); - add(scratch1, Immediate(kObjectAlignmentMask)); - and_(scratch1, Immediate(~kObjectAlignmentMask)); - - // Allocate one-byte string in new space. - Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, - REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required, - NO_ALLOCATION_FLAGS); - - // Set the map, length and hash field. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->one_byte_string_map())); - mov(scratch1, length); - SmiTag(scratch1); - mov(FieldOperand(result, String::kLengthOffset), scratch1); - mov(FieldOperand(result, String::kHashFieldOffset), - Immediate(String::kEmptyHashField)); -} - - -void MacroAssembler::AllocateOneByteString(Register result, int length, - Register scratch1, Register scratch2, - Label* gc_required) { - DCHECK(length > 0); - - // Allocate one-byte string in new space. - Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, - gc_required, NO_ALLOCATION_FLAGS); - - // Set the map, length and hash field. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->one_byte_string_map())); - mov(FieldOperand(result, String::kLengthOffset), - Immediate(Smi::FromInt(length))); - mov(FieldOperand(result, String::kHashFieldOffset), - Immediate(String::kEmptyHashField)); -} - - -void MacroAssembler::AllocateTwoByteConsString(Register result, - Register scratch1, - Register scratch2, - Label* gc_required) { - // Allocate heap number in new space. - Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - NO_ALLOCATION_FLAGS); - - // Set the map. The other fields are left uninitialized. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->cons_string_map())); -} - - -void MacroAssembler::AllocateOneByteConsString(Register result, - Register scratch1, - Register scratch2, - Label* gc_required) { - Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - NO_ALLOCATION_FLAGS); - - // Set the map. The other fields are left uninitialized. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->cons_one_byte_string_map())); -} - - -void MacroAssembler::AllocateTwoByteSlicedString(Register result, - Register scratch1, - Register scratch2, - Label* gc_required) { - // Allocate heap number in new space. - Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, - NO_ALLOCATION_FLAGS); - - // Set the map. The other fields are left uninitialized. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->sliced_string_map())); -} - - -void MacroAssembler::AllocateOneByteSlicedString(Register result, - Register scratch1, - Register scratch2, - Label* gc_required) { - // Allocate heap number in new space. - Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, - NO_ALLOCATION_FLAGS); - - // Set the map. The other fields are left uninitialized. - mov(FieldOperand(result, HeapObject::kMapOffset), - Immediate(isolate()->factory()->sliced_one_byte_string_map())); -} - - void MacroAssembler::AllocateJSValue(Register result, Register constructor, Register value, Register scratch, Label* gc_required) { @@ -2131,16 +1937,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } - -void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_flooding; - ExternalReference last_step_action = - ExternalReference::debug_last_step_action_address(isolate()); - STATIC_ASSERT(StepFrame > StepIn); - cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn)); - j(less, &skip_flooding); +void MacroAssembler::CheckDebugHook(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + Label skip_hook; + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0)); + j(equal, &skip_hook); { FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); @@ -2157,7 +1961,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, } Push(fun); Push(fun); - CallRuntime(Runtime::kDebugPrepareStepInIfStepping); + CallRuntime(Runtime::kDebugOnFunctionCall); Pop(fun); if (new_target.is_valid()) { Pop(new_target); @@ -2171,7 +1975,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, SmiUntag(expected.reg()); } } - bind(&skip_flooding); + bind(&skip_hook); } @@ -2185,8 +1989,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(function.is(edi)); DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx)); - if (call_wrapper.NeedsDebugStepCheck()) { - FloodFunctionIfStepping(function, new_target, expected, actual); + if (call_wrapper.NeedsDebugHookCheck()) { + CheckDebugHook(function, new_target, expected, actual); } // Clear the new.target register if not given. @@ -2291,28 +2095,6 @@ void MacroAssembler::LoadGlobalProxy(Register dst) { mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX)); } - -void MacroAssembler::LoadTransitionedArrayMapConditional( - ElementsKind expected_kind, - ElementsKind transitioned_kind, - Register map_in_out, - Register scratch, - Label* no_map_match) { - DCHECK(IsFastElementsKind(expected_kind)); - DCHECK(IsFastElementsKind(transitioned_kind)); - - // Check that the function's map is the same as the expected cached map. - mov(scratch, NativeContextOperand()); - cmp(map_in_out, - ContextOperand(scratch, Context::ArrayMapIndex(expected_kind))); - j(not_equal, no_map_match); - - // Use the transitioned cached map. - mov(map_in_out, - ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind))); -} - - void MacroAssembler::LoadGlobalFunction(int index, Register function) { // Load the native context from the current context. mov(function, NativeContextOperand()); @@ -2759,19 +2541,6 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst, psllq(dst, HeapNumber::kMantissaBits); } - -void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte( - Register instance_type, Register scratch, Label* failure) { - if (!scratch.is(instance_type)) { - mov(scratch, instance_type); - } - and_(scratch, - kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); - cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag); - j(not_equal, failure); -} - - void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1, Register object2, Register scratch1, @@ -3162,43 +2931,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento( cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map())); } - -void MacroAssembler::JumpIfDictionaryInPrototypeChain( - Register object, - Register scratch0, - Register scratch1, - Label* found) { - DCHECK(!scratch1.is(scratch0)); - Factory* factory = isolate()->factory(); - Register current = scratch0; - Label loop_again, end; - - // scratch contained elements pointer. - mov(current, object); - mov(current, FieldOperand(current, HeapObject::kMapOffset)); - mov(current, FieldOperand(current, Map::kPrototypeOffset)); - cmp(current, Immediate(factory->null_value())); - j(equal, &end); - - // Loop based on the map going up the prototype chain. - bind(&loop_again); - mov(current, FieldOperand(current, HeapObject::kMapOffset)); - STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE); - STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE); - CmpInstanceType(current, JS_OBJECT_TYPE); - j(below, found); - mov(scratch1, FieldOperand(current, Map::kBitField2Offset)); - DecodeField<Map::ElementsKindBits>(scratch1); - cmp(scratch1, Immediate(DICTIONARY_ELEMENTS)); - j(equal, found); - mov(current, FieldOperand(current, Map::kPrototypeOffset)); - cmp(current, Immediate(factory->null_value())); - j(not_equal, &loop_again); - - bind(&end); -} - - void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { DCHECK(!dividend.is(eax)); DCHECK(!dividend.is(edx)); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index e8ff59d41b..50ff068551 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -260,16 +260,6 @@ class MacroAssembler: public Assembler { // Load the global proxy from the current context. void LoadGlobalProxy(Register dst); - // Conditionally load the cached Array transitioned map of type - // transitioned_kind from the native context if the map in register - // map_in_out is the cached Array map in the native context of - // expected_kind. - void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, - ElementsKind transitioned_kind, - Register map_in_out, - Register scratch, - Label* no_map_match); - // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); @@ -344,9 +334,10 @@ class MacroAssembler: public Assembler { const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper); - void FloodFunctionIfStepping(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger if necessary. + void CheckDebugHook(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. @@ -391,24 +382,6 @@ class MacroAssembler: public Assembler { // Compare instance type for map. void CmpInstanceType(Register map, InstanceType type); - // Check if a map for a JSObject indicates that the object can have both smi - // and HeapObject elements. Jump to the specified label if it does not. - void CheckFastObjectElements(Register map, Label* fail, - Label::Distance distance = Label::kFar); - - // Check if a map for a JSObject indicates that the object has fast smi only - // elements. Jump to the specified label if it does not. - void CheckFastSmiElements(Register map, Label* fail, - Label::Distance distance = Label::kFar); - - // Check to see if maybe_number can be stored as a double in - // FastDoubleElements. If it can, store it at the index specified by key in - // the FastDoubleElements array elements, otherwise jump to fail. - void StoreNumberToDoubleElements(Register maybe_number, Register elements, - Register key, Register scratch1, - XMMRegister scratch2, Label* fail, - int offset = 0); - // Compare an object's map with the specified map. void CompareMap(Register obj, Handle<Map> map); @@ -640,31 +613,6 @@ class MacroAssembler: public Assembler { void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Label* gc_required, MutableMode mode = IMMUTABLE); - // Allocate a sequential string. All the header fields of the string object - // are initialized. - void AllocateTwoByteString(Register result, Register length, - Register scratch1, Register scratch2, - Register scratch3, Label* gc_required); - void AllocateOneByteString(Register result, Register length, - Register scratch1, Register scratch2, - Register scratch3, Label* gc_required); - void AllocateOneByteString(Register result, int length, Register scratch1, - Register scratch2, Label* gc_required); - - // Allocate a raw cons string object. Only the map field of the result is - // initialized. - void AllocateTwoByteConsString(Register result, Register scratch1, - Register scratch2, Label* gc_required); - void AllocateOneByteConsString(Register result, Register scratch1, - Register scratch2, Label* gc_required); - - // Allocate a raw sliced string object. Only the map field of the result is - // initialized. - void AllocateTwoByteSlicedString(Register result, Register scratch1, - Register scratch2, Label* gc_required); - void AllocateOneByteSlicedString(Register result, Register scratch1, - Register scratch2, Label* gc_required); - // Allocate and initialize a JSValue wrapper with the specified {constructor} // and {value}. void AllocateJSValue(Register result, Register constructor, Register value, @@ -889,13 +837,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // String utilities. - // Check whether the instance type represents a flat one-byte string. Jump to - // the label if not. If the instance type can be scratched specify same - // register for both instance type and scratch. - void JumpIfInstanceTypeIsNotSequentialOneByte( - Register instance_type, Register scratch, - Label* on_not_flat_one_byte_string); - // Checks if both objects are sequential one-byte strings, and jumps to label // if either is not. void JumpIfNotBothSequentialOneByteStrings( @@ -943,20 +884,6 @@ class MacroAssembler: public Assembler { Register scratch_reg, Label* no_memento_found); - void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, - Register scratch_reg, - Label* memento_found) { - Label no_memento_found; - TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, - &no_memento_found); - j(equal, memento_found); - bind(&no_memento_found); - } - - // Jumps to found label if a prototype map has dictionary elements. - void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, - Register scratch1, Label* found); - private: bool generating_stub_; bool has_frame_; |