summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/heap.cc')
-rw-r--r--deps/v8/src/heap/heap.cc557
1 files changed, 169 insertions, 388 deletions
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index e84f56e076..25cb56d20c 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
#include "src/base/once.h"
@@ -117,7 +118,6 @@ Heap::Heap()
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
inline_allocation_disabled_(false),
- total_regexp_code_generated_(0),
tracer_(nullptr),
promoted_objects_size_(0),
promotion_ratio_(0),
@@ -141,8 +141,6 @@ Heap::Heap()
dead_object_stats_(nullptr),
scavenge_job_(nullptr),
idle_scavenge_observer_(nullptr),
- full_codegen_bytes_generated_(0),
- crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
@@ -160,8 +158,10 @@ Heap::Heap()
strong_roots_list_(NULL),
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
+ fast_promotion_mode_(false),
force_oom_(false),
- delay_sweeper_tasks_for_testing_(false) {
+ delay_sweeper_tasks_for_testing_(false),
+ pending_layout_change_object_(nullptr) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -990,24 +990,6 @@ bool Heap::CollectGarbage(GarbageCollector collector,
}
}
- if (collector == MARK_COMPACTOR && FLAG_incremental_marking &&
- !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
- !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() &&
- !incremental_marking()->NeedsFinalization() &&
- !IsCloseToOutOfMemory(new_space_->Capacity())) {
- if (!incremental_marking()->IsComplete() &&
- !mark_compact_collector()->marking_deque()->IsEmpty() &&
- !FLAG_gc_global) {
- if (FLAG_trace_incremental_marking) {
- isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Delaying MarkSweep.\n");
- }
- collector = YoungGenerationCollector();
- collector_reason = "incremental marking delaying mark-sweep";
- }
- }
-
bool next_gc_likely_to_collect_more = false;
size_t committed_memory_before = 0;
@@ -1360,7 +1342,17 @@ bool Heap::PerformGarbageCollection(
MinorMarkCompact();
break;
case SCAVENGER:
- Scavenge();
+ if (fast_promotion_mode_ &&
+ CanExpandOldGeneration(new_space()->Size())) {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kFastPromotionDuringScavenge);
+ EvacuateYoungGeneration();
+ } else {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kRegularScavenge);
+
+ Scavenge();
+ }
break;
}
@@ -1370,6 +1362,10 @@ bool Heap::PerformGarbageCollection(
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
+ if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
+ ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
+ }
+
isolate_->counters()->objs_since_last_young()->Set(0);
gc_post_processing_depth_++;
@@ -1607,6 +1603,44 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
Heap* heap_;
};
+void Heap::EvacuateYoungGeneration() {
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
+ DCHECK(fast_promotion_mode_);
+ DCHECK(CanExpandOldGeneration(new_space()->Size()));
+
+ mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+
+ SetGCState(SCAVENGE);
+ LOG(isolate_, ResourceEvent("scavenge", "begin"));
+
+ // Move pages from new->old generation.
+ PageRange range(new_space()->bottom(), new_space()->top());
+ for (auto it = range.begin(); it != range.end();) {
+ Page* p = (*++it)->prev_page();
+ p->Unlink();
+ Page::ConvertNewToOld(p);
+ if (incremental_marking()->IsMarking())
+ mark_compact_collector()->RecordLiveSlotsOnPage(p);
+ }
+
+ // Reset new space.
+ if (!new_space()->Rebalance()) {
+ FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ new_space()->ResetAllocationInfo();
+ new_space()->set_age_mark(new_space()->top());
+
+ // Fix up special trackers.
+ external_string_table_.PromoteAllNewSpaceStrings();
+ // GlobalHandles are updated in PostGarbageCollectonProcessing
+
+ IncrementYoungSurvivorsCounter(new_space()->Size());
+ IncrementPromotedObjectsSize(new_space()->Size());
+ IncrementSemiSpaceCopiedObjectSize(0);
+
+ LOG(isolate_, ResourceEvent("scavenge", "end"));
+ SetGCState(NOT_IN_GC);
+}
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
@@ -1671,12 +1705,14 @@ void Heap::Scavenge() {
{
// Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
- return Scavenger::CheckAndScavengeObject(this, addr);
- });
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ this, SYNCHRONIZED, [this](Address addr) {
+ return Scavenger::CheckAndScavengeObject(this, addr);
+ });
RememberedSet<OLD_TO_NEW>::IterateTyped(
- this, [this](SlotType type, Address host_addr, Address addr) {
+ this, SYNCHRONIZED,
+ [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate(), type, addr, [this](Object** addr) {
// We expect that objects referenced by code are long living.
@@ -1748,6 +1784,19 @@ void Heap::Scavenge() {
SetGCState(NOT_IN_GC);
}
+void Heap::ComputeFastPromotionMode(double survival_rate) {
+ const size_t survived_in_new_space =
+ survived_last_scavenge_ * 100 / new_space_->Capacity();
+ fast_promotion_mode_ =
+ !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
+ !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
+ survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(
+ isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
+ fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
+ }
+}
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
@@ -1755,12 +1804,21 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- heap->FinalizeExternalString(String::cast(*p));
+ String* string = String::cast(*p);
+ if (!string->IsExternalString()) {
+ // Original external string has been internalized.
+ DCHECK(string->IsThinString());
+ return NULL;
+ }
+ heap->FinalizeExternalString(string);
return NULL;
}
// String is still reachable.
- return String::cast(first_word.ToForwardingAddress());
+ String* string = String::cast(first_word.ToForwardingAddress());
+ if (string->IsThinString()) string = ThinString::cast(string)->actual();
+ // Internalization can replace external strings with non-external strings.
+ return string->IsExternalString() ? string : nullptr;
}
@@ -1964,8 +2022,6 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
case kDoubleAligned:
case kDoubleUnaligned:
return kDoubleSize - kPointerSize;
- case kSimd128Unaligned:
- return kSimd128Size - kPointerSize;
default:
UNREACHABLE();
}
@@ -1979,10 +2035,6 @@ int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
return kPointerSize;
if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
- if (alignment == kSimd128Unaligned) {
- return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
- kSimd128AlignmentMask;
- }
return 0;
}
@@ -2276,11 +2328,6 @@ bool Heap::CreateInitialMaps() {
mutable_heap_number)
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
-#define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
- ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type, \
- Context::TYPE##_FUNCTION_INDEX)
- SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
-#undef ALLOCATE_SIMD128_MAP
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
@@ -2339,6 +2386,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
@@ -2423,8 +2473,7 @@ bool Heap::CreateInitialMaps() {
return true;
}
-
-AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
+AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
@@ -2441,36 +2490,9 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
HeapObject::cast(result)->set_map_no_write_barrier(map);
- HeapNumber::cast(result)->set_value(value);
return result;
}
-#define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
- AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure) { \
- int size = Type::kSize; \
- STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize); \
- \
- AllocationSpace space = SelectSpace(pretenure); \
- \
- HeapObject* result = nullptr; \
- { \
- AllocationResult allocation = \
- AllocateRaw(size, space, kSimd128Unaligned); \
- if (!allocation.To(&result)) return allocation; \
- } \
- \
- result->set_map_no_write_barrier(type##_map()); \
- Type* instance = Type::cast(result); \
- for (int i = 0; i < lane_count; i++) { \
- instance->set_lane(i, lanes[i]); \
- } \
- return result; \
- }
-SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
-#undef SIMD_ALLOCATE_DEFINITION
-
-
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
@@ -2608,8 +2630,8 @@ void Heap::CreateInitialObjects() {
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
- set_hole_nan_value(*factory->NewHeapNumber(bit_cast<double>(kHoleNanInt64),
- IMMUTABLE, TENURED));
+ set_hole_nan_value(
+ *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
set_minus_infinity_value(
*factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
@@ -2764,61 +2786,6 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
{
- StaticFeedbackVectorSpec spec;
- FeedbackVectorSlot slot = spec.AddLoadICSlot();
- DCHECK_EQ(slot, FeedbackVectorSlot(FeedbackVector::kDummyLoadICSlot));
-
- slot = spec.AddKeyedLoadICSlot();
- DCHECK_EQ(slot,
- FeedbackVectorSlot(FeedbackVector::kDummyKeyedLoadICSlot));
-
- slot = spec.AddStoreICSlot();
- DCHECK_EQ(slot, FeedbackVectorSlot(FeedbackVector::kDummyStoreICSlot));
-
- slot = spec.AddKeyedStoreICSlot();
- DCHECK_EQ(slot,
- FeedbackVectorSlot(FeedbackVector::kDummyKeyedStoreICSlot));
-
- Handle<FeedbackMetadata> dummy_metadata =
- FeedbackMetadata::New(isolate(), &spec);
- Handle<FeedbackVector> dummy_vector =
- FeedbackVector::New(isolate(), dummy_metadata);
-
- set_dummy_vector(*dummy_vector);
-
- // Now initialize dummy vector's entries.
- LoadICNexus(isolate()).ConfigureMegamorphic();
- StoreICNexus(isolate()).ConfigureMegamorphic();
- KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
- KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
- }
-
- {
- // Create a canonical empty FeedbackVector, which is shared by all
- // functions that don't need actual type feedback slots. Note however
- // that all these functions will share the same invocation count, but
- // that shouldn't matter since we only use the invocation count to
- // relativize the absolute call counts, but we can only have call counts
- // if we have actual feedback slots.
- Handle<FixedArray> empty_feedback_vector = factory->NewFixedArray(
- FeedbackVector::kReservedIndexCount, TENURED);
- empty_feedback_vector->set(FeedbackVector::kMetadataIndex,
- empty_fixed_array());
- empty_feedback_vector->set(FeedbackVector::kInvocationCountIndex,
- Smi::kZero);
- empty_feedback_vector->set_map(feedback_vector_map());
- set_empty_feedback_vector(*empty_feedback_vector);
-
- // We use a canonical empty LiteralsArray for all functions that neither
- // have literals nor need a FeedbackVector (besides the invocation
- // count special slot).
- Handle<FixedArray> empty_literals_array =
- factory->NewFixedArray(1, TENURED);
- empty_literals_array->set(0, *empty_feedback_vector);
- set_empty_literals_array(*empty_literals_array);
- }
-
- {
Handle<FixedArray> empty_sloppy_arguments_elements =
factory->NewFixedArray(2, TENURED);
empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
@@ -2842,6 +2809,8 @@ void Heap::CreateInitialObjects() {
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
weak_new_space_object_to_code_list()->SetLength(0);
+ set_code_coverage_list(undefined_value());
+
set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
@@ -2870,7 +2839,7 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell();
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_has_instance_protector(*cell);
+ set_array_iterator_protector(*cell);
Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
@@ -2888,10 +2857,6 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_fast_array_iteration_protector(*fast_array_iteration_cell);
- Handle<Cell> array_iterator_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_array_iterator_protector(*array_iterator_cell);
-
cell = factory->NewPropertyCell();
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
@@ -2964,6 +2929,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kWeakObjectToCodeTableRootIndex:
case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
+ case kCodeCoverageListRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
case kSerializedTemplatesRootIndex:
@@ -2984,7 +2950,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
}
-
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return !RootCanBeWrittenAfterInitialization(root_index) &&
!InNewSpace(root(root_index));
@@ -3191,7 +3156,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+ ObjectMarking::IsBlack(object)) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
MemoryChunk::IncrementLiveBytes(object, by);
}
@@ -3201,6 +3166,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
CHECK_NOT_NULL(object);
+ DCHECK(CanMoveObjectStart(object));
DCHECK(!object->IsFixedTypedArrayBase());
DCHECK(!object->IsByteArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
@@ -3251,7 +3217,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
- DCHECK(CanMoveObjectStart(object));
Object** former_start = HeapObject::RawField(object, 0);
int new_start_index = elements_to_trim * (element_size / kPointerSize);
former_start[new_start_index] = map;
@@ -3322,7 +3287,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
- Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(filler))) {
+ ObjectMarking::IsBlackOrGrey(filler)) {
Page* page = Page::FromAddress(new_end);
page->markbits()->ClearRange(
page->AddressToMarkbitIndex(new_end),
@@ -4131,21 +4096,8 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
}
-bool Heap::IsHeapIterable() {
- // TODO(hpayer): This function is not correct. Allocation folding in old
- // space breaks the iterability.
- return new_space_top_after_last_gc_ == new_space()->top();
-}
-
-
void Heap::MakeHeapIterable() {
- DCHECK(AllowHeapAllocation::IsAllowed());
- if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask,
- GarbageCollectionReason::kMakeHeapIterable);
- }
mark_compact_collector()->EnsureSweepingCompleted();
- DCHECK(IsHeapIterable());
}
@@ -4313,28 +4265,21 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
// for marking. We just have to execute the special visiting side effect
// code that adds objects to global data structures, e.g. for array buffers.
- // Code space, map space, and large object space do not use black pages.
- // Hence we have to color all objects of the reservation first black to avoid
- // unnecessary marking deque load.
if (incremental_marking()->black_allocation()) {
+ // Iterate black objects in old space, code space, map space, and large
+ // object space for side effects.
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
- Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
- addr += obj->Size();
- }
- }
- }
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
- const Heap::Reservation& res = reservations[i];
- for (auto& chunk : res) {
- Address addr = chunk.start;
- while (addr < chunk.end) {
- HeapObject* obj = HeapObject::FromAddress(addr);
- incremental_marking()->IterateBlackObject(obj);
+ // There might be grey objects due to black to grey transitions in
+ // incremental marking. E.g. see VisitNativeContextIncremental.
+ DCHECK(ObjectMarking::IsBlackOrGrey(obj));
+ if (ObjectMarking::IsBlack(obj)) {
+ incremental_marking()->IterateBlackObject(obj);
+ }
addr += obj->Size();
}
}
@@ -4342,6 +4287,29 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
}
}
+void Heap::NotifyObjectLayoutChange(HeapObject* object,
+ const DisallowHeapAllocation&) {
+ if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
+ incremental_marking()->MarkGrey(this, object);
+ }
+#ifdef VERIFY_HEAP
+ DCHECK(pending_layout_change_object_ == nullptr);
+ pending_layout_change_object_ = object;
+#endif
+}
+
+#ifdef VERIFY_HEAP
+void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+ if (pending_layout_change_object_ == nullptr) {
+ DCHECK(!object->IsJSObject() ||
+ !object->map()->TransitionRequiresSynchronizationWithGC(new_map));
+ } else {
+ DCHECK_EQ(pending_layout_change_object_, object);
+ pending_layout_change_object_ = nullptr;
+ }
+}
+#endif
+
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
@@ -4907,8 +4875,7 @@ void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
// it would be a violation of the invariant to record it's slots.
bool record_slots = false;
if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
- record_slots = Marking::IsBlack(mark_bit);
+ record_slots = ObjectMarking::IsBlack(target);
}
IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
@@ -5279,7 +5246,6 @@ const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
const double Heap::kConservativeHeapGrowingFactor = 1.3;
const double Heap::kTargetMutatorUtilization = 0.97;
-
// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
@@ -5697,8 +5663,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
IncrementalMarking::MarkGrey(this, heap_object);
} else {
DCHECK(mark_compact_collector()->in_use());
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
- mark_compact_collector()->MarkObject(heap_object, mark_bit);
+ mark_compact_collector()->MarkObject(heap_object);
}
}
@@ -5711,22 +5676,6 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_print_max_heap_committed) {
- PrintF("\n");
- PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory());
- PrintF("maximum_committed_by_new_space=%" PRIuS " ",
- new_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_old_space=%" PRIuS " ",
- old_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_code_space=%" PRIuS " ",
- code_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_map_space=%" PRIuS " ",
- map_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_lo_space=%" PRIuS " ",
- lo_space_->MaximumCommittedMemory());
- PrintF("\n\n");
- }
-
if (FLAG_verify_predictable) {
PrintAlloctionsHash();
}
@@ -6146,8 +6095,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject* object) {
if (object->IsFiller()) return true;
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
- return Marking::IsWhite(mark_bit);
+ return ObjectMarking::IsWhite(object);
}
private:
@@ -6159,6 +6107,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
+ // Use Marking instead of ObjectMarking to avoid adjusting live bytes
+ // counter.
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
if (Marking::IsWhite(mark_bit)) {
Marking::WhiteToBlack(mark_bit);
@@ -6188,16 +6138,15 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
DisallowHeapAllocation no_allocation_;
};
-
HeapIterator::HeapIterator(Heap* heap,
HeapIterator::HeapObjectsFiltering filtering)
- : make_heap_iterable_helper_(heap),
- no_heap_allocation_(),
+ : no_heap_allocation_(),
heap_(heap),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
object_iterator_(nullptr) {
+ heap_->MakeHeapIterable();
heap_->heap_iterator_start();
// Start the iteration.
space_iterator_ = new SpaceIterator(heap_);
@@ -6257,194 +6206,6 @@ HeapObject* HeapIterator::NextObject() {
}
-#ifdef DEBUG
-
-Object* const PathTracer::kAnyGlobalObject = NULL;
-
-class PathTracer::MarkVisitor : public ObjectVisitor {
- public:
- explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-
- void VisitPointers(Object** start, Object** end) override {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; !tracer_->found() && (p < end); p++) {
- if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-class PathTracer::UnmarkVisitor : public ObjectVisitor {
- public:
- explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-
- void VisitPointers(Object** start, Object** end) override {
- // Scan all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
- }
- }
-
- private:
- PathTracer* tracer_;
-};
-
-
-void PathTracer::VisitPointers(Object** start, Object** end) {
- bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; !done && (p < end); p++) {
- if ((*p)->IsHeapObject()) {
- TracePathFrom(p);
- done = ((what_to_find_ == FIND_FIRST) && found_target_);
- }
- }
-}
-
-
-void PathTracer::Reset() {
- found_target_ = false;
- object_stack_.Clear();
-}
-
-
-void PathTracer::TracePathFrom(Object** root) {
- DCHECK((search_target_ == kAnyGlobalObject) ||
- search_target_->IsHeapObject());
- found_target_in_trace_ = false;
- Reset();
-
- MarkVisitor mark_visitor(this);
- MarkRecursively(root, &mark_visitor);
-
- UnmarkVisitor unmark_visitor(this);
- UnmarkRecursively(root, &unmark_visitor);
-
- ProcessResults();
-}
-
-
-static bool SafeIsNativeContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
-}
-
-
-void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- MapWord map_word = obj->map_word();
- if (!map_word.ToMap()->IsHeapObject()) return; // visited before
-
- if (found_target_in_trace_) return; // stop if target found
- object_stack_.Add(obj);
- if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
- (obj == search_target_)) {
- found_target_in_trace_ = true;
- found_target_ = true;
- return;
- }
-
- bool is_native_context = SafeIsNativeContext(obj);
-
- // not visited yet
- Map* map = Map::cast(map_word.ToMap());
-
- MapWord marked_map_word =
- MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
- obj->set_map_word(marked_map_word);
-
- // Scan the object body.
- if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
- // This is specialized to scan Context's properly.
- Object** start =
- reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
- Object** end =
- reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
- Context::FIRST_WEAK_SLOT * kPointerSize);
- mark_visitor->VisitPointers(start, end);
- } else {
- obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
- }
-
- // Scan the map after the body because the body is a lot more interesting
- // when doing leak detection.
- MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
-
- if (!found_target_in_trace_) { // don't pop if found the target
- object_stack_.RemoveLast();
- }
-}
-
-
-void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- MapWord map_word = obj->map_word();
- if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
-
- MapWord unmarked_map_word =
- MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
- obj->set_map_word(unmarked_map_word);
-
- Map* map = Map::cast(unmarked_map_word.ToMap());
-
- UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
-
- obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
-}
-
-
-void PathTracer::ProcessResults() {
- if (found_target_) {
- OFStream os(stdout);
- os << "=====================================\n"
- << "==== Path to object ====\n"
- << "=====================================\n\n";
-
- DCHECK(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) os << "\n |\n |\n V\n\n";
- object_stack_[i]->Print(os);
- }
- os << "=====================================\n";
- }
-}
-
-
-// Triggers a depth-first traversal of reachable objects from one
-// given root object and finds a path to a specific heap object and
-// prints it.
-void Heap::TracePathToObjectFrom(Object* target, Object* root) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- tracer.VisitPointer(&root);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
- PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to any global object and prints it. Useful for
-// determining the source for leaks of global objects.
-void Heap::TracePathToGlobal() {
- PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
- VISIT_ALL);
- IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-#endif
-
void Heap::UpdateTotalGCTime(double duration) {
if (FLAG_trace_gc_verbose) {
total_gc_time_ms_ += duration;
@@ -6455,14 +6216,19 @@ void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i]->IsTheHole(isolate)) {
+ Object* o = new_space_strings_[i];
+ if (o->IsTheHole(isolate)) {
continue;
}
- DCHECK(new_space_strings_[i]->IsExternalString());
- if (heap_->InNewSpace(new_space_strings_[i])) {
- new_space_strings_[last++] = new_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ DCHECK(o->IsExternalString());
+ if (heap_->InNewSpace(o)) {
+ new_space_strings_[last++] = o;
} else {
- old_space_strings_.Add(new_space_strings_[i]);
+ old_space_strings_.Add(o);
}
}
new_space_strings_.Rewind(last);
@@ -6474,12 +6240,17 @@ void Heap::ExternalStringTable::CleanUpAll() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i]->IsTheHole(isolate)) {
+ Object* o = old_space_strings_[i];
+ if (o->IsTheHole(isolate)) {
continue;
}
- DCHECK(old_space_strings_[i]->IsExternalString());
- DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
- old_space_strings_[last++] = old_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ DCHECK(o->IsExternalString());
+ DCHECK(!heap_->InNewSpace(o));
+ old_space_strings_[last++] = o;
}
old_space_strings_.Rewind(last);
old_space_strings_.Trim();
@@ -6492,11 +6263,21 @@ void Heap::ExternalStringTable::CleanUpAll() {
void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
- heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+ Object* o = new_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ heap_->FinalizeExternalString(ExternalString::cast(o));
}
new_space_strings_.Free();
for (int i = 0; i < old_space_strings_.length(); ++i) {
- heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+ Object* o = old_space_strings_[i];
+ if (o->IsThinString()) {
+ o = ThinString::cast(o)->actual();
+ if (!o->IsExternalString()) continue;
+ }
+ heap_->FinalizeExternalString(ExternalString::cast(o));
}
old_space_strings_.Free();
}