diff options
author | Ben Noordhuis <info@bnoordhuis.nl> | 2015-01-07 18:38:38 +0100 |
---|---|---|
committer | Ben Noordhuis <info@bnoordhuis.nl> | 2015-01-07 22:11:18 +0100 |
commit | dad73f645cde6920e79db956e7ef82ed640d7615 (patch) | |
tree | 7ba3f3fc7e0722c5f130065461b7c56f571af383 /deps/v8/src/heap/mark-compact.cc | |
parent | 53ba494537259b18b346dc6150d6a100c557e08f (diff) | |
download | node-new-dad73f645cde6920e79db956e7ef82ed640d7615.tar.gz |
deps: upgrade v8 to 3.31.74.1
PR-URL: https://github.com/iojs/io.js/pull/243
Reviewed-By: Fedor Indutny <fedor@indutny.com>
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
Diffstat (limited to 'deps/v8/src/heap/mark-compact.cc')
-rw-r--r-- | deps/v8/src/heap/mark-compact.cc | 308 |
1 files changed, 131 insertions, 177 deletions
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc index 2cefebf980..fa127db612 100644 --- a/deps/v8/src/heap/mark-compact.cc +++ b/deps/v8/src/heap/mark-compact.cc @@ -47,9 +47,11 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) was_marked_incrementally_(false), sweeping_in_progress_(false), pending_sweeper_jobs_semaphore_(0), - sequential_sweeping_(false), + evacuation_(false), migration_slots_buffer_(NULL), heap_(heap), + marking_deque_memory_(NULL), + marking_deque_memory_committed_(false), code_flusher_(NULL), have_code_to_deoptimize_(false) { } @@ -233,7 +235,10 @@ void MarkCompactCollector::SetUp() { } -void MarkCompactCollector::TearDown() { AbortCompaction(); } +void MarkCompactCollector::TearDown() { + AbortCompaction(); + delete marking_deque_memory_; +} void MarkCompactCollector::AddEvacuationCandidate(Page* p) { @@ -304,6 +309,8 @@ void MarkCompactCollector::CollectGarbage() { heap_->set_encountered_weak_cells(Smi::FromInt(0)); + isolate()->global_handles()->CollectPhantomCallbackData(); + #ifdef VERIFY_HEAP if (FLAG_verify_heap) { VerifyMarking(heap_); @@ -378,7 +385,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() { for (HeapObject* obj = code_iterator.Next(); obj != NULL; obj = code_iterator.Next()) { Code* code = Code::cast(obj); - if (!code->is_optimized_code() && !code->is_weak_stub()) continue; + if (!code->is_optimized_code()) continue; if (WillBeDeoptimized(code)) continue; code->VerifyEmbeddedObjectsDependency(); } @@ -441,7 +448,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task { private: // v8::Task overrides. - virtual void Run() OVERRIDE { + void Run() OVERRIDE { heap_->mark_compact_collector()->SweepInParallel(space_, 0); heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal(); } @@ -470,12 +477,12 @@ void MarkCompactCollector::EnsureSweepingCompleted() { // If sweeping is not completed or not running at all, we try to complete it // here. - if (FLAG_predictable || !IsSweepingCompleted()) { + if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0); SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0); } // Wait twice for both jobs. - if (!FLAG_predictable) { + if (FLAG_concurrent_sweeping) { pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait(); } @@ -487,7 +494,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() { heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); #ifdef VERIFY_HEAP - if (FLAG_verify_heap) { + if (FLAG_verify_heap && !evacuation()) { VerifyEvacuation(heap_); } #endif @@ -1206,7 +1213,6 @@ static inline HeapObject* ShortCircuitConsString(Object** p) { // except the maps for the object and its possible substrings might be // marked. HeapObject* object = HeapObject::cast(*p); - if (!FLAG_clever_optimizations) return object; Map* map = object->map(); InstanceType type = map->instance_type(); if (!IsShortcutCandidate(type)) return object; @@ -1945,11 +1951,6 @@ void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { } -bool MarkCompactCollector::IsMarkingDequeEmpty() { - return marking_deque_.IsEmpty(); -} - - void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { // Mark the heap roots including global variables, stack variables, // etc., and all objects reachable from them. @@ -2015,13 +2016,18 @@ void MarkCompactCollector::MarkWeakObjectToCodeTable() { // After: the marking stack is empty, and all objects reachable from the // marking stack have been marked, or are overflowed in the heap. void MarkCompactCollector::EmptyMarkingDeque() { + Map* filler_map = heap_->one_pointer_filler_map(); while (!marking_deque_.IsEmpty()) { HeapObject* object = marking_deque_.Pop(); + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = object->map(); + if (map == filler_map) continue; + DCHECK(object->IsHeapObject()); DCHECK(heap()->Contains(object)); - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); + DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object))); - Map* map = object->map(); MarkBit map_mark = Marking::MarkBitFrom(map); MarkObject(map, map_mark); @@ -2084,13 +2090,16 @@ void MarkCompactCollector::ProcessMarkingDeque() { // Mark all objects reachable (transitively) from objects on the marking // stack including references only considered in the atomic marking pause. -void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) { +void MarkCompactCollector::ProcessEphemeralMarking( + ObjectVisitor* visitor, bool only_process_harmony_weak_collections) { bool work_to_do = true; - DCHECK(marking_deque_.IsEmpty()); + DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed()); while (work_to_do) { - isolate()->global_handles()->IterateObjectGroups( - visitor, &IsUnmarkedHeapObjectWithHeap); - MarkImplicitRefGroups(); + if (!only_process_harmony_weak_collections) { + isolate()->global_handles()->IterateObjectGroups( + visitor, &IsUnmarkedHeapObjectWithHeap); + MarkImplicitRefGroups(); + } ProcessWeakCollections(); work_to_do = !marking_deque_.IsEmpty(); ProcessMarkingDeque(); @@ -2116,6 +2125,43 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { } +void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() { + if (marking_deque_memory_ == NULL) { + marking_deque_memory_ = new base::VirtualMemory(4 * MB); + } + if (!marking_deque_memory_committed_) { + bool success = marking_deque_memory_->Commit( + reinterpret_cast<Address>(marking_deque_memory_->address()), + marking_deque_memory_->size(), + false); // Not executable. + CHECK(success); + marking_deque_memory_committed_ = true; + InitializeMarkingDeque(); + } +} + + +void MarkCompactCollector::InitializeMarkingDeque() { + if (marking_deque_memory_committed_) { + Address addr = static_cast<Address>(marking_deque_memory_->address()); + size_t size = marking_deque_memory_->size(); + if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; + marking_deque_.Initialize(addr, addr + size); + } +} + + +void MarkCompactCollector::UncommitMarkingDeque() { + if (marking_deque_memory_committed_) { + bool success = marking_deque_memory_->Uncommit( + reinterpret_cast<Address>(marking_deque_memory_->address()), + marking_deque_memory_->size()); + CHECK(success); + marking_deque_memory_committed_ = false; + } +} + + void MarkCompactCollector::MarkLiveObjects() { GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); double start_time = 0.0; @@ -2127,42 +2173,21 @@ void MarkCompactCollector::MarkLiveObjects() { // with the C stack limit check. PostponeInterruptsScope postpone(isolate()); - bool incremental_marking_overflowed = false; IncrementalMarking* incremental_marking = heap_->incremental_marking(); if (was_marked_incrementally_) { - // Finalize the incremental marking and check whether we had an overflow. - // Both markers use grey color to mark overflowed objects so - // non-incremental marker can deal with them as if overflow - // occured during normal marking. - // But incremental marker uses a separate marking deque - // so we have to explicitly copy its overflow state. incremental_marking->Finalize(); - incremental_marking_overflowed = - incremental_marking->marking_deque()->overflowed(); - incremental_marking->marking_deque()->ClearOverflowed(); } else { // Abort any pending incremental activities e.g. incremental sweeping. incremental_marking->Abort(); + InitializeMarkingDeque(); } #ifdef DEBUG DCHECK(state_ == PREPARE_GC); state_ = MARK_LIVE_OBJECTS; #endif - // The to space contains live objects, a page in from space is used as a - // marking stack. - Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); - Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); - if (FLAG_force_marking_deque_overflows) { - marking_deque_end = marking_deque_start + 64 * kPointerSize; - } - marking_deque_.Initialize(marking_deque_start, marking_deque_end); - DCHECK(!marking_deque_.overflowed()); - if (incremental_marking_overflowed) { - // There are overflowed objects left in the heap after incremental marking. - marking_deque_.SetOverflowed(); - } + EnsureMarkingDequeIsCommittedAndInitialize(); PrepareForCodeFlushing(); @@ -2199,30 +2224,35 @@ void MarkCompactCollector::MarkLiveObjects() { ProcessTopOptimizedFrame(&root_visitor); - // The objects reachable from the roots are marked, yet unreachable - // objects are unmarked. Mark objects reachable due to host - // application specific logic or through Harmony weak maps. - ProcessEphemeralMarking(&root_visitor); + { + GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE); + + // The objects reachable from the roots are marked, yet unreachable + // objects are unmarked. Mark objects reachable due to host + // application specific logic or through Harmony weak maps. + ProcessEphemeralMarking(&root_visitor, false); + + // The objects reachable from the roots, weak maps or object groups + // are marked. Objects pointed to only by weak global handles cannot be + // immediately reclaimed. Instead, we have to mark them as pending and mark + // objects reachable from them. + // + // First we identify nonlive weak handles and mark them as pending + // destruction. + heap()->isolate()->global_handles()->IdentifyWeakHandles( + &IsUnmarkedHeapObject); + // Then we mark the objects. + heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); + ProcessMarkingDeque(); - // The objects reachable from the roots, weak maps or object groups - // are marked, yet unreachable objects are unmarked. Mark objects - // reachable only from weak global handles. - // - // First we identify nonlive weak handles and mark them as pending - // destruction. - heap()->isolate()->global_handles()->IdentifyWeakHandles( - &IsUnmarkedHeapObject); - // Then we mark the objects and process the transitive closure. - heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); - while (marking_deque_.overflowed()) { - RefillMarkingDeque(); - EmptyMarkingDeque(); + // Repeat Harmony weak maps marking to mark unmarked objects reachable from + // the weak roots we just marked as pending destruction. + // + // We only process harmony collections, as all object groups have been fully + // processed and no weakly reachable node can discover new objects groups. + ProcessEphemeralMarking(&root_visitor, true); } - // Repeat host application specific and Harmony weak maps marking to - // mark unmarked objects reachable from the weak roots. - ProcessEphemeralMarking(&root_visitor); - AfterMarking(); if (FLAG_print_cumulative_gc_stat) { @@ -2232,12 +2262,6 @@ void MarkCompactCollector::MarkLiveObjects() { void MarkCompactCollector::AfterMarking() { - // Object literal map caches reference strings (cache keys) and maps - // (cache values). At this point still useful maps have already been - // marked. Mark the keys for the alive values before we process the - // string table. - ProcessMapCaches(); - // Prune the string table removing all strings only pointed to by the // string table. Cannot use string_table() here because the string // table is marked. @@ -2274,57 +2298,6 @@ void MarkCompactCollector::AfterMarking() { } -void MarkCompactCollector::ProcessMapCaches() { - Object* raw_context = heap()->native_contexts_list(); - while (raw_context != heap()->undefined_value()) { - Context* context = reinterpret_cast<Context*>(raw_context); - if (IsMarked(context)) { - HeapObject* raw_map_cache = - HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); - // A map cache may be reachable from the stack. In this case - // it's already transitively marked and it's too late to clean - // up its parts. - if (!IsMarked(raw_map_cache) && - raw_map_cache != heap()->undefined_value()) { - MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); - int existing_elements = map_cache->NumberOfElements(); - int used_elements = 0; - for (int i = MapCache::kElementsStartIndex; i < map_cache->length(); - i += MapCache::kEntrySize) { - Object* raw_key = map_cache->get(i); - if (raw_key == heap()->undefined_value() || - raw_key == heap()->the_hole_value()) - continue; - STATIC_ASSERT(MapCache::kEntrySize == 2); - Object* raw_map = map_cache->get(i + 1); - if (raw_map->IsHeapObject() && IsMarked(raw_map)) { - ++used_elements; - } else { - // Delete useless entries with unmarked maps. - DCHECK(raw_map->IsMap()); - map_cache->set_the_hole(i); - map_cache->set_the_hole(i + 1); - } - } - if (used_elements == 0) { - context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); - } else { - // Note: we don't actually shrink the cache here to avoid - // extra complexity during GC. We rely on subsequent cache - // usages (EnsureCapacity) to do this. - map_cache->ElementsRemoved(existing_elements - used_elements); - MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); - MarkObject(map_cache, map_cache_markbit); - } - } - } - // Move to next element in the list. - raw_context = context->get(Context::NEXT_CONTEXT_LINK); - } - ProcessMarkingDeque(); -} - - void MarkCompactCollector::ClearNonLiveReferences() { // Iterate over the map space, setting map transitions that go from // a marked map to an unmarked map to null transitions. This action @@ -2572,34 +2545,12 @@ void MarkCompactCollector::TrimEnumCache(Map* map, } -void MarkCompactCollector::ClearDependentICList(Object* head) { - Object* current = head; - Object* undefined = heap()->undefined_value(); - while (current != undefined) { - Code* code = Code::cast(current); - if (IsMarked(code)) { - DCHECK(code->is_weak_stub()); - IC::InvalidateMaps(code); - } - current = code->next_code_link(); - code->set_next_code_link(undefined); - } -} - - void MarkCompactCollector::ClearDependentCode(DependentCode* entries) { DisallowHeapAllocation no_allocation; DependentCode::GroupStartIndexes starts(entries); int number_of_entries = starts.number_of_entries(); if (number_of_entries == 0) return; - int g = DependentCode::kWeakICGroup; - if (starts.at(g) != starts.at(g + 1)) { - int i = starts.at(g); - DCHECK(i + 1 == starts.at(g + 1)); - Object* head = entries->object_at(i); - ClearDependentICList(head); - } - g = DependentCode::kWeakCodeGroup; + int g = DependentCode::kWeakCodeGroup; for (int i = starts.at(g); i < starts.at(g + 1); i++) { // If the entry is compilation info then the map must be alive, // and ClearDependentCode shouldn't be called. @@ -2621,34 +2572,17 @@ void MarkCompactCollector::ClearDependentCode(DependentCode* entries) { int MarkCompactCollector::ClearNonLiveDependentCodeInGroup( DependentCode* entries, int group, int start, int end, int new_start) { int survived = 0; - if (group == DependentCode::kWeakICGroup) { - // Dependent weak IC stubs form a linked list and only the head is stored - // in the dependent code array. - if (start != end) { - DCHECK(start + 1 == end); - Object* old_head = entries->object_at(start); - MarkCompactWeakObjectRetainer retainer; - Object* head = VisitWeakList<Code>(heap(), old_head, &retainer); - entries->set_object_at(new_start, head); - Object** slot = entries->slot_at(new_start); - RecordSlot(slot, slot, head); - // We do not compact this group even if the head is undefined, - // more dependent ICs are likely to be added later. - survived = 1; - } - } else { - for (int i = start; i < end; i++) { - Object* obj = entries->object_at(i); - DCHECK(obj->IsCode() || IsMarked(obj)); - if (IsMarked(obj) && - (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { - if (new_start + survived != i) { - entries->set_object_at(new_start + survived, obj); - } - Object** slot = entries->slot_at(new_start + survived); - RecordSlot(slot, slot, obj); - survived++; + for (int i = start; i < end; i++) { + Object* obj = entries->object_at(i); + DCHECK(obj->IsCode() || IsMarked(obj)); + if (IsMarked(obj) && + (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { + if (new_start + survived != i) { + entries->set_object_at(new_start + survived, obj); } + Object** slot = entries->slot_at(new_start + survived); + RecordSlot(slot, slot, obj); + survived++; } } entries->set_number_of_entries( @@ -2810,12 +2744,24 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, Address dst_slot = dst_addr; DCHECK(IsAligned(size, kPointerSize)); + bool may_contain_raw_values = src->MayContainRawValues(); +#if V8_DOUBLE_FIELDS_UNBOXING + LayoutDescriptorHelper helper(src->map()); + bool has_only_tagged_fields = helper.all_fields_tagged(); +#endif for (int remaining = size / kPointerSize; remaining > 0; remaining--) { Object* value = Memory::Object_at(src_slot); Memory::Object_at(dst_slot) = value; - if (!src->MayContainRawValues()) { +#if V8_DOUBLE_FIELDS_UNBOXING + if (!may_contain_raw_values && + (has_only_tagged_fields || + helper.IsTagged(static_cast<int>(src_slot - src_addr)))) +#else + if (!may_contain_raw_values) +#endif + { RecordMigratedSlot(value, dst_slot); } @@ -2928,7 +2874,8 @@ class PointersUpdatingVisitor : public ObjectVisitor { } static inline void UpdateSlot(Heap* heap, Object** slot) { - Object* obj = *slot; + Object* obj = reinterpret_cast<Object*>( + base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); if (!obj->IsHeapObject()) return; @@ -2939,7 +2886,10 @@ class PointersUpdatingVisitor : public ObjectVisitor { DCHECK(heap->InFromSpace(heap_obj) || MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); HeapObject* target = map_word.ToForwardingAddress(); - *slot = target; + base::NoBarrier_CompareAndSwap( + reinterpret_cast<base::AtomicWord*>(slot), + reinterpret_cast<base::AtomicWord>(obj), + reinterpret_cast<base::AtomicWord>(target)); DCHECK(!heap->InFromSpace(target) && !MarkCompactCollector::IsOnEvacuationCandidate(target)); } @@ -3256,7 +3206,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, } HeapObject* live_object = HeapObject::FromAddress(free_end); DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object))); - Map* map = live_object->map(); + Map* map = live_object->synchronized_map(); int size = live_object->SizeFromMap(map); if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { live_object->IterateBody(map->instance_type(), size, v); @@ -3436,6 +3386,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { { GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PAGES); + EvacuationScope evacuation_scope(this); EvacuatePages(); } @@ -4182,12 +4133,11 @@ void MarkCompactCollector::SweepSpaces() { GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLDSPACE); { - SequentialSweepingScope scope(this); SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING); SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING); } sweeping_in_progress_ = true; - if (!FLAG_predictable) { + if (FLAG_concurrent_sweeping) { StartSweeperThreads(); } } @@ -4222,6 +4172,10 @@ void MarkCompactCollector::SweepSpaces() { // Deallocate evacuated candidate pages. ReleaseEvacuationCandidates(); + CodeRange* code_range = heap()->isolate()->code_range(); + if (code_range != NULL && code_range->valid()) { + code_range->ReserveEmergencyBlock(); + } if (FLAG_print_cumulative_gc_stat) { heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - |