summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/mark-compact.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/mark-compact.cc')
-rw-r--r--deps/v8/src/heap/mark-compact.cc969
1 files changed, 417 insertions, 552 deletions
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 5b29c2175b..9ca06cf2e6 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -52,7 +52,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
migration_slots_buffer_(NULL),
heap_(heap),
marking_deque_memory_(NULL),
- marking_deque_memory_committed_(false),
+ marking_deque_memory_committed_(0),
code_flusher_(NULL),
have_code_to_deoptimize_(false) {
}
@@ -226,7 +226,8 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
- EnsureMarkingDequeIsCommittedAndInitialize(256 * KB);
+ EnsureMarkingDequeIsReserved();
+ EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
}
@@ -259,8 +260,7 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
CollectEvacuationCandidates(heap()->old_space());
- if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
- FLAG_incremental_code_compaction)) {
+ if (FLAG_compact_code_space) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
@@ -336,6 +336,7 @@ void MarkCompactCollector::CollectGarbage() {
DCHECK(state_ == PREPARE_GC);
MarkLiveObjects();
+
DCHECK(heap_->incremental_marking()->IsStopped());
// ClearNonLiveReferences can deoptimize code in dependent code arrays.
@@ -343,7 +344,7 @@ void MarkCompactCollector::CollectGarbage() {
// arrays are cleared or contain only live code objects.
ProcessAndClearWeakCells();
- if (FLAG_collect_maps) ClearNonLiveReferences();
+ ClearNonLiveReferences();
ClearWeakCollections();
@@ -367,7 +368,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
VerifyWeakEmbeddedObjectsInCode();
- if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
+ if (FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
}
#endif
@@ -556,34 +557,6 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
}
-void Marking::SetAllMarkBitsInRange(MarkBit start, MarkBit end) {
- MarkBit::CellType* start_cell = start.cell();
- MarkBit::CellType* end_cell = end.cell();
- MarkBit::CellType start_mask = ~(start.mask() - 1);
- MarkBit::CellType end_mask = (end.mask() << 1) - 1;
-
- if (start_cell == end_cell) {
- *start_cell |= start_mask & end_mask;
- } else {
- *start_cell |= start_mask;
- for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
- *cell = ~0;
- }
- *end_cell |= end_mask;
- }
-}
-
-
-void Marking::ClearAllMarkBitsOfCellsContainedInRange(MarkBit start,
- MarkBit end) {
- MarkBit::CellType* start_cell = start.cell();
- MarkBit::CellType* end_cell = end.cell();
- for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
- *cell = 0;
- }
-}
-
-
void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
@@ -641,136 +614,15 @@ const char* AllocationSpaceName(AllocationSpace space) {
}
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them. Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
- // If page was not swept then there are no free list items on it.
- if (!p->WasSwept()) {
- if (FLAG_trace_fragmentation_verbose) {
- PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()), p->LiveBytes());
- }
- return FLAG_always_compact ? 1 : 0;
- }
-
- PagedSpace::SizeStats sizes;
- space->ObtainFreeListStatistics(p, &sizes);
-
- intptr_t ratio;
- intptr_t ratio_threshold;
- intptr_t area_size = space->AreaSize();
- if (space->identity() == CODE_SPACE) {
- ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
- ratio_threshold = 10;
- } else {
- ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
- ratio_threshold = 15;
- }
-
- if (FLAG_trace_fragmentation_verbose) {
- PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
- static_cast<int>(sizes.small_size_),
- static_cast<double>(sizes.small_size_ * 100) / area_size,
- static_cast<int>(sizes.medium_size_),
- static_cast<double>(sizes.medium_size_ * 100) / area_size,
- static_cast<int>(sizes.large_size_),
- static_cast<double>(sizes.large_size_ * 100) / area_size,
- static_cast<int>(sizes.huge_size_),
- static_cast<double>(sizes.huge_size_ * 100) / area_size,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
-
- if (FLAG_always_compact && sizes.Total() != area_size) {
- return 1;
- }
-
- if (ratio <= ratio_threshold) return 0; // Not fragmented.
-
- return static_cast<int>(ratio - ratio_threshold);
-}
-
-
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
- static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
- int max_evacuation_candidates =
- static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
-
- if (FLAG_stress_compaction || FLAG_always_compact) {
- max_evacuation_candidates = kMaxMaxEvacuationCandidates;
- }
-
- class Candidate {
- public:
- Candidate() : fragmentation_(0), page_(NULL) {}
- Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
-
- int fragmentation() { return fragmentation_; }
- Page* page() { return page_; }
-
- private:
- int fragmentation_;
- Page* page_;
- };
-
- enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
+ int area_size = space->AreaSize();
- CompactionMode mode = COMPACT_FREE_LISTS;
-
- intptr_t reserved = number_of_pages * space->AreaSize();
- intptr_t over_reserved = reserved - space->SizeOfObjects();
- static const intptr_t kFreenessThreshold = 50;
-
- if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
- // If reduction of memory footprint was requested, we are aggressive
- // about choosing pages to free. We expect that half-empty pages
- // are easier to compact so slightly bump the limit.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates += 2;
- }
-
-
- if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
- // If over-usage is very high (more than a third of the space), we
- // try to free all mostly empty pages. We expect that almost empty
- // pages are even easier to compact so bump the limit even more.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates *= 2;
- }
-
- if (FLAG_always_compact) {
- max_evacuation_candidates = kMaxMaxEvacuationCandidates;
- }
-
- if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF(
- "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
- "evacuation candidate limit: %d\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
- }
-
- intptr_t estimated_release = 0;
-
- if (FLAG_trace_fragmentation &&
- max_evacuation_candidates >= kMaxMaxEvacuationCandidates) {
- PrintF("Hit max page compaction limit of %d pages\n",
- kMaxMaxEvacuationCandidates);
- }
- max_evacuation_candidates =
- Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
-
- std::vector<Candidate> candidates(max_evacuation_candidates);
-
- int count = 0;
- int fragmentation = 0;
- int page_number = 0;
- int least_index = -1;
+ // Pairs of (live_bytes_in_page, page).
+ std::vector<std::pair<int, Page*> > pages;
+ pages.reserve(number_of_pages);
PageIterator it(space);
while (it.has_next()) {
@@ -781,88 +633,107 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
p->ClearFlag(Page::POPULAR_PAGE);
continue;
}
-
// Invariant: Evacuation candidates are just created when marking is
// started. At the end of a GC all evacuation candidates are cleared and
// their slot buffers are released.
CHECK(!p->IsEvacuationCandidate());
CHECK(p->slots_buffer() == NULL);
-
- if (FLAG_stress_compaction) {
- if (FLAG_manual_evacuation_candidates_selection) {
- if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
- p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
- fragmentation = 1;
- }
- } else {
- unsigned int counter = space->heap()->ms_count();
- if ((counter & 1) == (page_number & 1)) fragmentation = 1;
- page_number++;
+ DCHECK(p->area_size() == area_size);
+ int live_bytes =
+ p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
+ pages.push_back(std::make_pair(live_bytes, p));
+ }
+
+ int candidate_count = 0;
+ int total_live_bytes = 0;
+
+ bool reduce_memory =
+ reduce_memory_footprint_ || heap()->HasLowAllocationRate();
+ if (FLAG_manual_evacuation_candidates_selection) {
+ for (size_t i = 0; i < pages.size(); i++) {
+ Page* p = pages[i].second;
+ if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ AddEvacuationCandidate(p);
}
- } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) {
- // Don't try to release too many pages.
- if (estimated_release >= over_reserved) {
- continue;
+ }
+ } else if (FLAG_stress_compaction) {
+ for (size_t i = 0; i < pages.size(); i++) {
+ Page* p = pages[i].second;
+ if (i % 2 == 0) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ AddEvacuationCandidate(p);
}
+ }
+ } else {
+ const int kTargetFragmentationPercent = 50;
+ const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
- intptr_t free_bytes = 0;
-
- if (!p->WasSwept()) {
- free_bytes = (p->area_size() - p->LiveBytes());
- } else {
- PagedSpace::SizeStats sizes;
- space->ObtainFreeListStatistics(p, &sizes);
- free_bytes = sizes.Total();
- }
+ const int kTargetFragmentationPercentForReduceMemory = 20;
+ const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
- int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
+ int max_evacuated_bytes;
+ int target_fragmentation_percent;
- if (free_pct >= kFreenessThreshold) {
- estimated_release += free_bytes;
- fragmentation = free_pct;
- } else {
- fragmentation = 0;
+ if (reduce_memory) {
+ target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
+ max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
+ } else {
+ target_fragmentation_percent = kTargetFragmentationPercent;
+ max_evacuated_bytes = kMaxEvacuatedBytes;
+ }
+ intptr_t free_bytes_threshold =
+ target_fragmentation_percent * (area_size / 100);
+
+ // Sort pages from the most free to the least free, then select
+ // the first n pages for evacuation such that:
+ // - the total size of evacuated objects does not exceed the specified
+ // limit.
+ // - fragmentation of (n+1)-th page does not exceed the specified limit.
+ std::sort(pages.begin(), pages.end());
+ for (size_t i = 0; i < pages.size(); i++) {
+ int live_bytes = pages[i].first;
+ int free_bytes = area_size - live_bytes;
+ if (FLAG_always_compact ||
+ (free_bytes >= free_bytes_threshold &&
+ total_live_bytes + live_bytes <= max_evacuated_bytes)) {
+ candidate_count++;
+ total_live_bytes += live_bytes;
}
-
if (FLAG_trace_fragmentation_verbose) {
- PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- static_cast<int>(free_bytes),
- static_cast<double>(free_bytes * 100) / p->area_size(),
- (fragmentation > 0) ? "[fragmented]" : "");
+ PrintF(
+ "Page in %s: %d KB free [fragmented if this >= %d KB], "
+ "sum of live bytes in fragmented pages %d KB [max is %d KB]\n",
+ AllocationSpaceName(space->identity()),
+ static_cast<int>(free_bytes / KB),
+ static_cast<int>(free_bytes_threshold / KB),
+ static_cast<int>(total_live_bytes / KB),
+ static_cast<int>(max_evacuated_bytes / KB));
}
- } else {
- fragmentation = FreeListFragmentation(space, p);
}
-
- if (fragmentation != 0) {
- if (count < max_evacuation_candidates) {
- candidates[count++] = Candidate(fragmentation, p);
- } else {
- if (least_index == -1) {
- for (int i = 0; i < max_evacuation_candidates; i++) {
- if (least_index == -1 ||
- candidates[i].fragmentation() <
- candidates[least_index].fragmentation()) {
- least_index = i;
- }
- }
- }
- if (candidates[least_index].fragmentation() < fragmentation) {
- candidates[least_index] = Candidate(fragmentation, p);
- least_index = -1;
- }
- }
+ // How many pages we will allocated for the evacuated objects
+ // in the worst case: ceil(total_live_bytes / area_size)
+ int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+ DCHECK_LE(estimated_new_pages, candidate_count);
+ int estimated_released_pages = candidate_count - estimated_new_pages;
+ // Avoid (compact -> expand) cycles.
+ if (estimated_released_pages == 0 && !FLAG_always_compact)
+ candidate_count = 0;
+ for (int i = 0; i < candidate_count; i++) {
+ AddEvacuationCandidate(pages[i].second);
}
}
- for (int i = 0; i < count; i++) {
- AddEvacuationCandidate(candidates[i].page());
- }
-
- if (count > 0 && FLAG_trace_fragmentation) {
- PrintF("Collected %d evacuation candidates for space %s\n", count,
- AllocationSpaceName(space->identity()));
+ if (FLAG_trace_fragmentation) {
+ PrintF(
+ "Collected %d evacuation candidates [%d KB live] for space %s "
+ "[mode %s]\n",
+ candidate_count, static_cast<int>(total_live_bytes / KB),
+ AllocationSpaceName(space->identity()),
+ (reduce_memory ? "reduce memory footprint" : "normal"));
}
}
@@ -878,7 +749,6 @@ void MarkCompactCollector::AbortCompaction() {
}
compacting_ = false;
evacuation_candidates_.Rewind(0);
- invalidated_code_.Rewind(0);
}
DCHECK_EQ(0, evacuation_candidates_.length());
}
@@ -998,6 +868,10 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
shared->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
+ // Always flush the optimized code map if there is one.
+ if (!shared->optimized_code_map()->IsSmi()) {
+ shared->ClearOptimizedCodeMap();
+ }
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
@@ -1041,6 +915,10 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
candidate->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
+ // Always flush the optimized code map if there is one.
+ if (!candidate->optimized_code_map()->IsSmi()) {
+ candidate->ClearOptimizedCodeMap();
+ }
candidate->set_code(lazy_compile);
}
@@ -1066,29 +944,60 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
next_holder = GetNextCodeMap(holder);
ClearNextCodeMap(holder);
+ // Process context-dependent entries in the optimized code map.
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
i += SharedFunctionInfo::kEntryLength) {
+ // Each entry contains [ context, code, literals, ast-id ] as fields.
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
+ Context* context =
+ Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
Code* code =
Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ FixedArray* literals = FixedArray::cast(
+ code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
+ Smi* ast_id =
+ Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
+ if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
- // Move every slot in the entry.
- for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
- int dst_index = new_length++;
- Object** slot = code_map->RawFieldOfElementAt(dst_index);
- Object* object = code_map->get(i + j);
- code_map->set(dst_index, object);
- if (j == SharedFunctionInfo::kOsrAstIdOffset) {
- DCHECK(object->IsSmi());
- } else {
- DCHECK(
- Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
- isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
- *slot);
- }
+ if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
+ // Move every slot in the entry and record slots when needed.
+ code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
+ code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
+ code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
+ code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
+ Object** code_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kCachedCodeOffset);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ code_slot, code_slot, *code_slot);
+ Object** context_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kContextOffset);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ context_slot, context_slot, *context_slot);
+ Object** literals_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kLiteralsOffset);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ literals_slot, literals_slot, *literals_slot);
+ new_length += SharedFunctionInfo::kEntryLength;
+ }
+
+ // Process context-independent entry in the optimized code map.
+ Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
+ if (shared_object->IsCode()) {
+ Code* shared_code = Code::cast(shared_object);
+ if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
+ code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
+ } else {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
+ Object** slot =
+ code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+ *slot);
}
}
@@ -1175,11 +1084,12 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
- ->get(SharedFunctionInfo::kNextMapIndex)
- ->IsUndefined());
+ FixedArray* code_map =
+ FixedArray::cast(code_map_holder->optimized_code_map());
+ DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
// Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(code_map);
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
if (FLAG_trace_code_flushing) {
@@ -1690,11 +1600,6 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
- // Enable code flushing for non-incremental cycles.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(!was_marked_incrementally_);
- }
-
// If code flushing is disabled, there is no need to prepare for it.
if (!is_code_flushing_enabled()) return;
@@ -1941,16 +1846,8 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
continue;
}
- AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawDoubleAligned(size);
- } else {
- allocation = new_space->AllocateRaw(size);
- }
-#else
- allocation = new_space->AllocateRaw(size);
-#endif
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = new_space->AllocateRaw(size, alignment);
if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
@@ -1958,15 +1855,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
// always room.
UNREACHABLE();
}
-#ifndef V8_HOST_ARCH_64_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawDoubleAligned(size);
- } else {
- allocation = new_space->AllocateRaw(size);
- }
-#else
- allocation = new_space->AllocateRaw(size);
-#endif
+ allocation = new_space->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
}
Object* target = allocation.ToObjectChecked();
@@ -2258,41 +2147,46 @@ void MarkCompactCollector::RetainMaps() {
}
-void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
- size_t max_size) {
+void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
+ DCHECK(!marking_deque_.in_use());
+ if (marking_deque_memory_ == NULL) {
+ marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
+ marking_deque_memory_committed_ = 0;
+ }
+ if (marking_deque_memory_ == NULL) {
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+ }
+}
+
+
+void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
// If the marking deque is too small, we try to allocate a bigger one.
// If that fails, make do with a smaller one.
- for (size_t size = max_size; size >= 256 * KB; size >>= 1) {
+ CHECK(!marking_deque_.in_use());
+ for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
base::VirtualMemory* memory = marking_deque_memory_;
- bool is_committed = marking_deque_memory_committed_;
+ size_t currently_committed = marking_deque_memory_committed_;
- if (memory == NULL || memory->size() < size) {
- // If we don't have memory or we only have small memory, then
- // try to reserve a new one.
- memory = new base::VirtualMemory(size);
- is_committed = false;
- }
- if (is_committed) return;
- if (memory->IsReserved() &&
- memory->Commit(reinterpret_cast<Address>(memory->address()),
- memory->size(),
- false)) { // Not executable.
- if (marking_deque_memory_ != NULL && marking_deque_memory_ != memory) {
- delete marking_deque_memory_;
+ if (currently_committed == size) return;
+
+ if (currently_committed > size) {
+ bool success = marking_deque_memory_->Uncommit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
+ currently_committed - size);
+ if (success) {
+ marking_deque_memory_committed_ = size;
+ return;
}
- marking_deque_memory_ = memory;
- marking_deque_memory_committed_ = true;
- InitializeMarkingDeque();
+ UNREACHABLE();
+ }
+
+ bool success = memory->Commit(
+ reinterpret_cast<Address>(memory->address()) + currently_committed,
+ size - currently_committed,
+ false); // Not executable.
+ if (success) {
+ marking_deque_memory_committed_ = size;
return;
- } else {
- // Commit failed, so we are under memory pressure. If this was the
- // previously reserved area we tried to commit, then remove references
- // to it before deleting it and unreserving it.
- if (marking_deque_memory_ == memory) {
- marking_deque_memory_ = NULL;
- marking_deque_memory_committed_ = false;
- }
- delete memory; // Will also unreserve the virtual allocation.
}
}
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
@@ -2300,23 +2194,37 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
void MarkCompactCollector::InitializeMarkingDeque() {
- if (marking_deque_memory_committed_) {
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_->size();
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque_.Initialize(addr, addr + size);
- }
+ DCHECK(!marking_deque_.in_use());
+ DCHECK(marking_deque_memory_committed_ > 0);
+ Address addr = static_cast<Address>(marking_deque_memory_->address());
+ size_t size = marking_deque_memory_committed_;
+ if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+ marking_deque_.Initialize(addr, addr + size);
}
-void MarkCompactCollector::UncommitMarkingDeque() {
- if (marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Uncommit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size());
- CHECK(success);
- marking_deque_memory_committed_ = false;
+void MarkingDeque::Initialize(Address low, Address high) {
+ DCHECK(!in_use_);
+ HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+ HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+ array_ = obj_low;
+ mask_ = base::bits::RoundDownToPowerOfTwo32(
+ static_cast<uint32_t>(obj_high - obj_low)) -
+ 1;
+ top_ = bottom_ = 0;
+ overflowed_ = false;
+ in_use_ = true;
+}
+
+
+void MarkingDeque::Uninitialize(bool aborting) {
+ if (!aborting) {
+ DCHECK(IsEmpty());
+ DCHECK(!overflowed_);
}
+ DCHECK(in_use_);
+ top_ = bottom_ = 0xdecbad;
+ in_use_ = false;
}
@@ -2337,7 +2245,9 @@ void MarkCompactCollector::MarkLiveObjects() {
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Abort();
- InitializeMarkingDeque();
+ if (marking_deque_.in_use()) {
+ marking_deque_.Uninitialize(true);
+ }
}
#ifdef DEBUG
@@ -2345,7 +2255,8 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- EnsureMarkingDequeIsCommittedAndInitialize();
+ EnsureMarkingDequeIsCommittedAndInitialize(
+ MarkCompactCollector::kMaxMarkingDequeSize);
PrepareForCodeFlushing();
@@ -2420,11 +2331,6 @@ void MarkCompactCollector::AfterMarking() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
code_flusher_->ProcessCandidates();
- // If incremental marker does not support code flushing, we need to
- // disable it before incremental marking steps for next cycle.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(false);
- }
}
if (FLAG_track_gc_object_stats) {
@@ -2489,11 +2395,13 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
const int header = TransitionArray::kProtoTransitionHeaderSize;
int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
- Object* cached_map = prototype_transitions->get(header + i);
- if (IsMarked(cached_map)) {
+ Object* cell = prototype_transitions->get(header + i);
+ if (!WeakCell::cast(cell)->cleared()) {
if (new_number_of_transitions != i) {
- prototype_transitions->set(header + new_number_of_transitions,
- cached_map, SKIP_WRITE_BARRIER);
+ prototype_transitions->set(header + new_number_of_transitions, cell);
+ Object** slot = prototype_transitions->RawFieldOfElementAt(
+ header + new_number_of_transitions);
+ RecordSlot(slot, slot, cell);
}
new_number_of_transitions++;
}
@@ -2729,7 +2637,6 @@ void MarkCompactCollector::AbortWeakCollections() {
void MarkCompactCollector::ProcessAndClearWeakCells() {
- HeapObject* undefined = heap()->undefined_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
@@ -2764,19 +2671,18 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
RecordSlot(slot, slot, *slot);
}
weak_cell_obj = weak_cell->next();
- weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+ weak_cell->clear_next(heap());
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
void MarkCompactCollector::AbortWeakCells() {
- Object* undefined = heap()->undefined_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
- weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+ weak_cell->clear_next(heap());
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
@@ -2814,37 +2720,23 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
DCHECK(heap()->AllowedToBeMigrated(src, dest));
DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_SPACE) {
- Address src_slot = src_addr;
- Address dst_slot = dst_addr;
DCHECK(IsAligned(size, kPointerSize));
+ switch (src->ContentType()) {
+ case HeapObjectContents::kTaggedValues:
+ MigrateObjectTagged(dst, src, size);
+ break;
- bool may_contain_raw_values = src->MayContainRawValues();
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(src->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
-#endif
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
-#if V8_DOUBLE_FIELDS_UNBOXING
- if (!may_contain_raw_values &&
- (has_only_tagged_fields ||
- helper.IsTagged(static_cast<int>(src_slot - src_addr))))
-#else
- if (!may_contain_raw_values)
-#endif
- {
- RecordMigratedSlot(value, dst_slot);
- }
+ case HeapObjectContents::kMixedValues:
+ MigrateObjectMixed(dst, src, size);
+ break;
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
+ case HeapObjectContents::kRawValues:
+ MigrateObjectRaw(dst, src, size);
+ break;
}
if (compacting_ && dst->IsJSFunction()) {
- Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
+ Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
@@ -2852,30 +2744,6 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
- } else if (dst->IsConstantPoolArray()) {
- // We special case ConstantPoolArrays since they could contain integers
- // value entries which look like tagged pointers.
- // TODO(mstarzinger): restructure this code to avoid this special-casing.
- ConstantPoolArray* array = ConstantPoolArray::cast(dst);
- ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
- while (!code_iter.is_finished()) {
- Address code_entry_slot =
- dst_addr + array->OffsetOfElementAt(code_iter.next_index());
- Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
- }
- ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
- while (!heap_iter.is_finished()) {
- Address heap_slot =
- dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
- Object* value = Memory::Object_at(heap_slot);
- RecordMigratedSlot(value, heap_slot);
- }
}
} else if (dest == CODE_SPACE) {
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
@@ -2893,6 +2761,59 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
}
+void MarkCompactCollector::MigrateObjectTagged(HeapObject* dst, HeapObject* src,
+ int size) {
+ Address src_slot = src->address();
+ Address dst_slot = dst->address();
+ for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+ Object* value = Memory::Object_at(src_slot);
+ Memory::Object_at(dst_slot) = value;
+ RecordMigratedSlot(value, dst_slot);
+ src_slot += kPointerSize;
+ dst_slot += kPointerSize;
+ }
+}
+
+
+void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
+ int size) {
+ if (src->IsFixedTypedArrayBase()) {
+ heap()->MoveBlock(dst->address(), src->address(), size);
+ Address base_pointer_slot =
+ dst->address() + FixedTypedArrayBase::kBasePointerOffset;
+ RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot);
+ } else if (FLAG_unbox_double_fields) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
+ Address src_slot = src_addr;
+ Address dst_slot = dst_addr;
+
+ LayoutDescriptorHelper helper(src->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+ Object* value = Memory::Object_at(src_slot);
+
+ Memory::Object_at(dst_slot) = value;
+
+ if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
+ RecordMigratedSlot(value, dst_slot);
+ }
+
+ src_slot += kPointerSize;
+ dst_slot += kPointerSize;
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MarkCompactCollector::MigrateObjectRaw(HeapObject* dst, HeapObject* src,
+ int size) {
+ heap()->MoveBlock(dst->address(), src->address(), size);
+}
+
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor {
@@ -3119,16 +3040,8 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
OldSpace* old_space = heap()->old_space();
HeapObject* target;
- AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = old_space->AllocateRawDoubleAligned(object_size);
- } else {
- allocation = old_space->AllocateRaw(object_size);
- }
-#else
- allocation = old_space->AllocateRaw(object_size);
-#endif
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
if (allocation.To(&target)) {
MigrateObject(target, object, object_size, old_space->identity());
// If we end up needing more special cases, we should factor this out.
@@ -3145,11 +3058,18 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
HeapObject** out_object) {
- // This function does not support large objects right now.
Space* owner = p->owner();
if (owner == heap_->lo_space() || owner == NULL) {
- *out_object = NULL;
- return true;
+ Object* large_object = heap_->lo_space()->FindObject(slot);
+ // This object has to exist, otherwise we would not have recorded a slot
+ // for it.
+ CHECK(large_object->IsHeapObject());
+ HeapObject* large_heap_object = HeapObject::cast(large_object);
+ if (IsMarked(large_heap_object)) {
+ *out_object = large_heap_object;
+ return true;
+ }
+ return false;
}
uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
@@ -3266,25 +3186,37 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
return false;
}
-#if V8_DOUBLE_FIELDS_UNBOXING
- // |object| is NULL only when the slot belongs to large object space.
- DCHECK(object != NULL ||
- Page::FromAnyPointerAddress(heap_, slot)->owner() ==
- heap_->lo_space());
- // We don't need to check large objects' layout descriptor since it can't
- // contain in-object fields anyway.
- if (object != NULL) {
- // Filter out slots that happens to point to unboxed double fields.
- LayoutDescriptorHelper helper(object->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
- if (!has_only_tagged_fields &&
- !helper.IsTagged(static_cast<int>(slot - object->address()))) {
- return false;
- }
- }
-#endif
+ DCHECK(object != NULL);
- return true;
+ switch (object->ContentType()) {
+ case HeapObjectContents::kTaggedValues:
+ return true;
+
+ case HeapObjectContents::kRawValues: {
+ InstanceType type = object->map()->instance_type();
+ // Slots in maps and code can't be invalid because they are never
+ // shrunk.
+ if (type == MAP_TYPE || type == CODE_TYPE) return true;
+
+ // Consider slots in objects that contain ONLY raw data as invalid.
+ return false;
+ }
+
+ case HeapObjectContents::kMixedValues: {
+ if (object->IsFixedTypedArrayBase()) {
+ return static_cast<int>(slot - object->address()) ==
+ FixedTypedArrayBase::kBasePointerOffset;
+ } else if (FLAG_unbox_double_fields) {
+ // Filter out slots that happen to point to unboxed double fields.
+ LayoutDescriptorHelper helper(object->map());
+ DCHECK(!helper.all_fields_tagged());
+ return helper.IsTagged(static_cast<int>(slot - object->address()));
+ }
+ break;
+ }
+ }
+ UNREACHABLE();
+ return true;
}
@@ -3300,6 +3232,23 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
}
+void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
+ Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ if (p->IsEvacuationCandidate()) {
+ SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
+ end_slot);
+ }
+ }
+}
+
+
void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
@@ -3355,14 +3304,14 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
int size = object->Size();
-
+ AllocationAlignment alignment = object->RequiredAlignment();
HeapObject* target_object;
- AllocationResult allocation = space->AllocateRaw(size);
+ AllocationResult allocation = space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
// If allocation failed, use emergency memory and re-try allocation.
CHECK(space->HasEmergencyMemory());
space->UseEmergencyMemory();
- allocation = space->AllocateRaw(size);
+ allocation = space->AllocateRaw(size, alignment);
}
if (!allocation.To(&target_object)) {
// OS refused to give us memory.
@@ -3496,6 +3445,10 @@ static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
rinfo.Visit(isolate, v);
break;
}
+ case SlotsBuffer::OBJECT_SLOT: {
+ v->VisitPointer(reinterpret_cast<Object**>(addr));
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -3612,121 +3565,18 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
}
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
- Page* p = Page::FromAddress(code->address());
-
- if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- return false;
- }
-
- Address code_start = code->address();
- Address code_end = code_start + code->Size();
-
- uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
- uint32_t end_index =
- MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
- // TODO(hpayer): Filter out invalidated code in
- // ClearInvalidSlotsBufferEntries.
- Bitmap* b = p->markbits();
-
- MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
- MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
- if (value) {
- Marking::SetAllMarkBitsInRange(start_mark_bit, end_mark_bit);
- } else {
- Marking::ClearAllMarkBitsOfCellsContainedInRange(start_mark_bit,
- end_mark_bit);
- }
-
- return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
- // We did not record any slots in large objects thus
- // we can safely go to the page from the slot address.
- Page* p = Page::FromAddress(addr);
-
- // First check owner's identity because old space is swept concurrently or
- // lazily and might still have non-zero mark-bits on some pages.
- if (p->owner()->identity() != CODE_SPACE) return false;
-
- // In code space only bits on evacuation candidates (but we don't record
- // any slots on them) and under invalidated code objects are non-zero.
- MarkBit mark_bit =
- p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
- return Marking::IsBlackOrGrey(mark_bit);
-}
-
-
-void MarkCompactCollector::InvalidateCode(Code* code) {
- if (heap_->incremental_marking()->IsCompacting() &&
- !ShouldSkipEvacuationSlotRecording(code)) {
- DCHECK(compacting_);
-
- // If the object is white than no slots were recorded on it yet.
- MarkBit mark_bit = Marking::MarkBitFrom(code);
- if (Marking::IsWhite(mark_bit)) return;
-
- invalidated_code_.Add(code);
- }
-}
-
-
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
return code->is_optimized_code() && code->marked_for_deoptimization();
}
-bool MarkCompactCollector::MarkInvalidatedCode() {
- bool code_marked = false;
-
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
-
- if (SetMarkBitsUnderInvalidatedCode(code, true)) {
- code_marked = true;
- }
- }
-
- return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
- }
-}
-
-
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
- if (code != NULL) {
- code->Iterate(visitor);
- SetMarkBitsUnderInvalidatedCode(code, false);
- }
- }
- invalidated_code_.Rewind(0);
-}
-
-
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Heap::RelocationLock relocation_lock(heap());
- bool code_slots_filtering_required;
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_NEWSPACE);
- code_slots_filtering_required = MarkInvalidatedCode();
EvacuationScope evacuation_scope(this);
EvacuateNewSpace();
}
@@ -3773,8 +3623,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
- code_slots_filtering_required);
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
@@ -3808,8 +3657,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
- code_slots_filtering_required);
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer());
if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
@@ -3865,10 +3713,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
- // Visit invalidated code (we ignored all slots on it) and clear mark-bits
- // under it.
- ProcessInvalidatedCode(&updating_visitor);
-
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
@@ -4458,8 +4302,6 @@ void MarkCompactCollector::SweepSpaces() {
StartSweeperThreads();
}
}
- RemoveDeadInvalidatedCode();
-
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
@@ -4602,12 +4444,10 @@ void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Object* object = *slot;
- if (object->IsHeapObject()) {
- if (heap->InNewSpace(object) ||
- !heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot))) {
- slots[slot_idx] = kRemovedEntry;
- }
+ if ((object->IsHeapObject() && heap->InNewSpace(object)) ||
+ !heap->mark_compact_collector()->IsSlotInLiveObject(
+ reinterpret_cast<Address>(slot))) {
+ slots[slot_idx] = kRemovedEntry;
}
} else {
++slot_idx;
@@ -4619,6 +4459,41 @@ void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
}
+void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+ Address start_slot, Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ const ObjectSlot kRemovedEntry = HeapObject::RawField(
+ heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+ DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+ ->NeverEvacuate());
+
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+ bool is_typed_slot = false;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Address slot_address = reinterpret_cast<Address>(slot);
+ if (slot_address >= start_slot && slot_address < end_slot) {
+ slots[slot_idx] = kRemovedEntry;
+ if (is_typed_slot) {
+ slots[slot_idx - 1] = kRemovedEntry;
+ }
+ }
+ is_typed_slot = false;
+ } else {
+ is_typed_slot = true;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
@@ -4629,9 +4504,10 @@ void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
if (!IsTypedSlot(slot)) {
Object* object = *slot;
if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
CHECK(!heap->InNewSpace(object));
- CHECK(heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot)));
+ heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
+ reinterpret_cast<Address>(slot), heap_object);
}
} else {
++slot_idx;
@@ -4666,9 +4542,20 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+ Address addr = rinfo->pc();
+ SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = SlotsBuffer::OBJECT_SLOT;
+ }
+ }
bool success = SlotsBuffer::AddTo(
&slots_buffer_allocator_, target_page->slots_buffer_address(),
- SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
+ slot_type, addr, SlotsBuffer::FAIL_ON_OVERFLOW);
if (!success) {
EvictPopularEvacuationCandidate(target_page);
}
@@ -4751,28 +4638,6 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
}
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < idx_);
- Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
- if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
- }
-}
-
-
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
return new SlotsBuffer(next_buffer);
}
@@ -4792,5 +4657,5 @@ void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
}
*buffer_address = NULL;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8