diff options
Diffstat (limited to 'deps/v8/src/heap.h')
-rw-r--r-- | deps/v8/src/heap.h | 904 |
1 files changed, 599 insertions, 305 deletions
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index b1948a933c..83e9b61e84 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -32,46 +32,57 @@ #include "allocation.h" #include "globals.h" +#include "incremental-marking.h" #include "list.h" #include "mark-compact.h" +#include "objects-visiting.h" #include "spaces.h" #include "splay-tree-inl.h" +#include "store-buffer.h" #include "v8-counters.h" +#include "v8globals.h" namespace v8 { namespace internal { -// TODO(isolates): remove HEAP here -#define HEAP (_inline_get_heap_()) -class Heap; -inline Heap* _inline_get_heap_(); - - // Defines all the roots in Heap. -#define STRONG_ROOT_LIST(V) \ - /* Put the byte array map early. We need it to be in place by the time */ \ - /* the deserializer hits the next page, since it wants to put a byte */ \ - /* array in the unused space at the end of the page. */ \ +#define STRONG_ROOT_LIST(V) \ V(Map, byte_array_map, ByteArrayMap) \ + V(Map, free_space_map, FreeSpaceMap) \ V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ /* Cluster the most popular ones in a few cache lines here at the top. */ \ - V(Object, undefined_value, UndefinedValue) \ - V(Object, the_hole_value, TheHoleValue) \ - V(Object, null_value, NullValue) \ - V(Object, true_value, TrueValue) \ - V(Object, false_value, FalseValue) \ - V(Object, arguments_marker, ArgumentsMarker) \ + V(Smi, store_buffer_top, StoreBufferTop) \ + V(Oddball, undefined_value, UndefinedValue) \ + V(Oddball, the_hole_value, TheHoleValue) \ + V(Oddball, null_value, NullValue) \ + V(Oddball, true_value, TrueValue) \ + V(Oddball, false_value, FalseValue) \ + V(Map, global_property_cell_map, GlobalPropertyCellMap) \ + V(Map, shared_function_info_map, SharedFunctionInfoMap) \ + V(Map, meta_map, MetaMap) \ + V(Map, ascii_symbol_map, AsciiSymbolMap) \ + V(Map, ascii_string_map, AsciiStringMap) \ V(Map, heap_number_map, HeapNumberMap) \ V(Map, global_context_map, GlobalContextMap) \ V(Map, fixed_array_map, FixedArrayMap) \ - V(Map, serialized_scope_info_map, SerializedScopeInfoMap) \ + V(Map, code_map, CodeMap) \ + V(Map, scope_info_map, ScopeInfoMap) \ V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ - V(Map, meta_map, MetaMap) \ V(Map, hash_table_map, HashTableMap) \ + V(FixedArray, empty_fixed_array, EmptyFixedArray) \ + V(ByteArray, empty_byte_array, EmptyByteArray) \ + V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \ + V(String, empty_string, EmptyString) \ + V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Smi, stack_limit, StackLimit) \ + V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \ + V(Oddball, arguments_marker, ArgumentsMarker) \ + /* The first 32 roots above this line should be boring from a GC point of */ \ + /* view. This means they are never in new space and never on a page that */ \ + /* is being compacted. */ \ V(FixedArray, number_string_cache, NumberStringCache) \ V(Object, instanceof_cache_function, InstanceofCacheFunction) \ V(Object, instanceof_cache_map, InstanceofCacheMap) \ @@ -80,19 +91,12 @@ inline Heap* _inline_get_heap_(); V(FixedArray, string_split_cache, StringSplitCache) \ V(Object, termination_exception, TerminationException) \ V(Smi, hash_seed, HashSeed) \ - V(FixedArray, empty_fixed_array, EmptyFixedArray) \ - V(ByteArray, empty_byte_array, EmptyByteArray) \ - V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \ - V(String, empty_string, EmptyString) \ - V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Map, string_map, StringMap) \ - V(Map, ascii_string_map, AsciiStringMap) \ V(Map, symbol_map, SymbolMap) \ V(Map, cons_string_map, ConsStringMap) \ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, sliced_string_map, SlicedStringMap) \ V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ - V(Map, ascii_symbol_map, AsciiSymbolMap) \ V(Map, cons_symbol_map, ConsSymbolMap) \ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \ V(Map, external_symbol_map, ExternalSymbolMap) \ @@ -101,6 +105,16 @@ inline Heap* _inline_get_heap_(); V(Map, external_string_map, ExternalStringMap) \ V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ + V(Map, short_external_symbol_map, ShortExternalSymbolMap) \ + V(Map, \ + short_external_symbol_with_ascii_data_map, \ + ShortExternalSymbolWithAsciiDataMap) \ + V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \ + V(Map, short_external_string_map, ShortExternalStringMap) \ + V(Map, \ + short_external_string_with_ascii_data_map, \ + ShortExternalStringWithAsciiDataMap) \ + V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ V(Map, undetectable_string_map, UndetectableStringMap) \ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ V(Map, external_pixel_array_map, ExternalPixelArrayMap) \ @@ -117,14 +131,12 @@ inline Heap* _inline_get_heap_(); V(Map, catch_context_map, CatchContextMap) \ V(Map, with_context_map, WithContextMap) \ V(Map, block_context_map, BlockContextMap) \ - V(Map, code_map, CodeMap) \ V(Map, oddball_map, OddballMap) \ - V(Map, global_property_cell_map, GlobalPropertyCellMap) \ - V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, message_object_map, JSMessageObjectMap) \ V(Map, foreign_map, ForeignMap) \ - V(Object, nan_value, NanValue) \ - V(Object, minus_zero_value, MinusZeroValue) \ + V(HeapNumber, nan_value, NanValue) \ + V(HeapNumber, infinity_value, InfinityValue) \ + V(HeapNumber, minus_zero_value, MinusZeroValue) \ V(Map, neander_map, NeanderMap) \ V(JSObject, message_listeners, MessageListeners) \ V(Foreign, prototype_accessors, PrototypeAccessors) \ @@ -138,6 +150,7 @@ inline Heap* _inline_get_heap_(); V(Script, empty_script, EmptyScript) \ V(Smi, real_stack_limit, RealStackLimit) \ V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ + V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) #define ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \ @@ -227,7 +240,11 @@ inline Heap* _inline_get_heap_(); V(closure_symbol, "(closure)") \ V(use_strict, "use strict") \ V(dot_symbol, ".") \ - V(anonymous_function_symbol, "(anonymous function)") + V(anonymous_function_symbol, "(anonymous function)") \ + V(compare_ic_symbol, ".compare_ic") \ + V(infinity_symbol, "Infinity") \ + V(minus_infinity_symbol, "-Infinity") \ + V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") // Forward declarations. class GCTracer; @@ -239,10 +256,26 @@ class WeakObjectRetainer; typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, Object** pointer); -typedef bool (*DirtyRegionCallback)(Heap* heap, - Address start, - Address end, - ObjectSlotCallback copy_object_func); +class StoreBufferRebuilder { + public: + explicit StoreBufferRebuilder(StoreBuffer* store_buffer) + : store_buffer_(store_buffer) { + } + + void Callback(MemoryChunk* page, StoreBufferEvent event); + + private: + StoreBuffer* store_buffer_; + + // We record in this variable how full the store buffer was when we started + // iterating over the current page, finding pointers to new space. If the + // store buffer overflows again we can exempt the page from the store buffer + // by rewinding to this point instead of having to search the store buffer. + Object*** start_of_current_page_; + // The current page we are scanning in the store buffer iterator. + MemoryChunk* current_page_; +}; + // The all static Heap captures the interface to the global object heap. @@ -257,32 +290,103 @@ class HeapDebugUtils; // by it's size to avoid dereferencing a map pointer for scanning. class PromotionQueue { public: - PromotionQueue() : front_(NULL), rear_(NULL) { } + explicit PromotionQueue(Heap* heap) + : front_(NULL), + rear_(NULL), + limit_(NULL), + emergency_stack_(0), + heap_(heap) { } + + void Initialize(); + + void Destroy() { + ASSERT(is_empty()); + delete emergency_stack_; + emergency_stack_ = NULL; + } + + inline void ActivateGuardIfOnTheSamePage(); + + Page* GetHeadPage() { + return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); + } + + void SetNewLimit(Address limit) { + if (!guard_) { + return; + } - void Initialize(Address start_address) { - front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); + ASSERT(GetHeadPage() == Page::FromAllocationTop(limit)); + limit_ = reinterpret_cast<intptr_t*>(limit); + + if (limit_ <= rear_) { + return; + } + + RelocateQueueHead(); } - bool is_empty() { return front_ <= rear_; } + bool is_empty() { + return (front_ == rear_) && + (emergency_stack_ == NULL || emergency_stack_->length() == 0); + } inline void insert(HeapObject* target, int size); void remove(HeapObject** target, int* size) { + ASSERT(!is_empty()); + if (front_ == rear_) { + Entry e = emergency_stack_->RemoveLast(); + *target = e.obj_; + *size = e.size_; + return; + } + + if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { + NewSpacePage* front_page = + NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); + ASSERT(!front_page->prev_page()->is_anchor()); + front_ = + reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit()); + } *target = reinterpret_cast<HeapObject*>(*(--front_)); *size = static_cast<int>(*(--front_)); // Assert no underflow. - ASSERT(front_ >= rear_); + SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), + reinterpret_cast<Address>(front_)); } private: - // The front of the queue is higher in memory than the rear. + // The front of the queue is higher in the memory page chain than the rear. intptr_t* front_; intptr_t* rear_; + intptr_t* limit_; + + bool guard_; + + static const int kEntrySizeInWords = 2; + + struct Entry { + Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { } + + HeapObject* obj_; + int size_; + }; + List<Entry>* emergency_stack_; + + Heap* heap_; + + void RelocateQueueHead(); DISALLOW_COPY_AND_ASSIGN(PromotionQueue); }; +typedef void (*ScavengingCallback)(Map* map, + HeapObject** slot, + HeapObject* object); + + // External strings table is a place where all external strings are // registered. We need to keep track of such strings to properly // finalize them. @@ -323,19 +427,24 @@ class ExternalStringTable { }; +enum ArrayStorageAllocationMode { + DONT_INITIALIZE_ARRAY_ELEMENTS, + INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE +}; + class Heap { public: // Configure heap size before setup. Return false if the heap has been - // setup already. + // set up already. bool ConfigureHeap(int max_semispace_size, - int max_old_gen_size, - int max_executable_size); + intptr_t max_old_gen_size, + intptr_t max_executable_size); bool ConfigureHeapDefault(); // Initializes the global object heap. If create_heap_objects is true, // also creates the basic non-mutable objects. // Returns whether it succeeded. - bool Setup(bool create_heap_objects); + bool SetUp(bool create_heap_objects); // Destroys all memory allocated by the heap. void TearDown(); @@ -345,8 +454,8 @@ class Heap { // jslimit_/real_jslimit_ variable in the StackGuard. void SetStackLimits(); - // Returns whether Setup has been called. - bool HasBeenSetup(); + // Returns whether SetUp has been called. + bool HasBeenSetUp(); // Returns the maximum amount of memory reserved for the heap. For // the young generation, we reserve 4 times the amount needed for a @@ -424,6 +533,30 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateJSObject( JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED); + // Allocate a JSArray with no elements + MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray( + ElementsKind elements_kind, + PretenureFlag pretenure = NOT_TENURED) { + return AllocateJSArrayAndStorage(elements_kind, 0, 0, + DONT_INITIALIZE_ARRAY_ELEMENTS, + pretenure); + } + + // Allocate a JSArray with a specified length but elements that are left + // uninitialized. + MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage( + ElementsKind elements_kind, + int length, + int capacity, + ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS, + PretenureFlag pretenure = NOT_TENURED); + + // Allocate a JSArray with no elements + MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements( + FixedArrayBase* array_base, + ElementsKind elements_kind, + PretenureFlag pretenure = NOT_TENURED); + // Allocates and initializes a new global object based on a constructor. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -457,6 +590,7 @@ class Heap { // size, but keeping the original prototype. The receiver must have at least // the size of the new object. The object is reinitialized and behaves as an // object that has been freshly allocated. + // Returns failure if an error occured, otherwise object. MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object, InstanceType type, int size); @@ -485,8 +619,10 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type, - int instance_size); + MUST_USE_RESULT MaybeObject* AllocateMap( + InstanceType instance_type, + int instance_size, + ElementsKind elements_kind = FAST_ELEMENTS); // Allocates a partial map for bootstrapping. MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, @@ -499,11 +635,14 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateCodeCache(); // Allocates a serialized scope info. - MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length); + MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length); // Allocates an empty PolymorphicCodeCache. MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache(); + // Allocates a pre-tenured empty AccessorPair. + MUST_USE_RESULT MaybeObject* AllocateAccessorPair(); + // Clear the Instanceof cache (used when a prototype changes). inline void ClearInstanceofCache(); @@ -576,7 +715,7 @@ class Heap { PretenureFlag pretenure = NOT_TENURED); // Computes a single character string where the character has code. - // A cache is used for ascii codes. + // A cache is used for ASCII codes. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode( @@ -664,6 +803,13 @@ class Heap { int length, PretenureFlag pretenure = NOT_TENURED); + // Allocates a fixed double array with hole values. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + // Please note this does not perform a garbage collection. + MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles( + int length, + PretenureFlag pretenure = NOT_TENURED); + // AllocateHashTable is identical to AllocateFixedArray except // that the resulting object has hash_table_map as map. MUST_USE_RESULT MaybeObject* AllocateHashTable( @@ -689,7 +835,7 @@ class Heap { // Allocate a block context. MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function, Context* previous, - SerializedScopeInfo* info); + ScopeInfo* info); // Allocates a new utility object in the old generation. MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type); @@ -738,13 +884,15 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value); + MUST_USE_RESULT inline MaybeObject* NumberFromInt32( + int32_t value, PretenureFlag pretenure = NOT_TENURED); // Converts an int into either a Smi or a HeapNumber object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value); + MUST_USE_RESULT inline MaybeObject* NumberFromUint32( + uint32_t value, PretenureFlag pretenure = NOT_TENURED); // Allocates a new foreign object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -797,9 +945,9 @@ class Heap { // failed. // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii( - ExternalAsciiString::Resource* resource); + const ExternalAsciiString::Resource* resource); MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte( - ExternalTwoByteString::Resource* resource); + const ExternalTwoByteString::Resource* resource); // Finalizes an external string by deleting the associated external // data and clearing the resource pointer. @@ -878,19 +1026,35 @@ class Heap { // Performs garbage collection operation. // Returns whether there is a chance that another major GC could // collect more garbage. - bool CollectGarbage(AllocationSpace space, GarbageCollector collector); + bool CollectGarbage(AllocationSpace space, + GarbageCollector collector, + const char* gc_reason, + const char* collector_reason); // Performs garbage collection operation. // Returns whether there is a chance that another major GC could // collect more garbage. - inline bool CollectGarbage(AllocationSpace space); + inline bool CollectGarbage(AllocationSpace space, + const char* gc_reason = NULL); + + static const int kNoGCFlags = 0; + static const int kMakeHeapIterableMask = 1; + static const int kReduceMemoryFootprintMask = 2; - // Performs a full garbage collection. Force compaction if the - // parameter is true. - void CollectAllGarbage(bool force_compaction); + // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is + // non-zero, then the slower precise sweeper is used, which leaves the heap + // in a state where we can iterate over the heap visiting all objects. + void CollectAllGarbage(int flags, const char* gc_reason = NULL); // Last hope GC, should try to squeeze as much as possible. - void CollectAllAvailableGarbage(); + void CollectAllAvailableGarbage(const char* gc_reason = NULL); + + // Check whether the heap is currently iterable. + bool IsHeapIterable(); + + // Ensure that we have swept all spaces in such a way that we can iterate + // over all objects. May cause a GC. + void EnsureHeapIsIterable(); // Notify the heap that a context has been disposed. int NotifyContextDisposed() { return ++contexts_disposed_; } @@ -899,6 +1063,20 @@ class Heap { // ensure correct callback for weak global handles. void PerformScavenge(); + inline void increment_scan_on_scavenge_pages() { + scan_on_scavenge_pages_++; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + + inline void decrement_scan_on_scavenge_pages() { + scan_on_scavenge_pages_--; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + PromotionQueue* promotion_queue() { return &promotion_queue_; } #ifdef DEBUG @@ -925,6 +1103,8 @@ class Heap { // Heap root getters. We have versions with and without type::cast() here. // You can't use type::cast during GC because the assert fails. + // TODO(1490): Try removing the unchecked accessors, now that GC marking does + // not corrupt the map. #define ROOT_ACCESSOR(type, name, camel_name) \ type* name() { \ return type::cast(roots_[k##camel_name##RootIndex]); \ @@ -958,6 +1138,9 @@ class Heap { } Object* global_contexts_list() { return global_contexts_list_; } + // Number of mark-sweeps. + int ms_count() { return ms_count_; } + // Iterates over all roots in the heap. void IterateRoots(ObjectVisitor* v, VisitMode mode); // Iterates over all strong roots in the heap. @@ -965,60 +1148,16 @@ class Heap { // Iterates over all the other roots in the heap. void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); - enum ExpectedPageWatermarkState { - WATERMARK_SHOULD_BE_VALID, - WATERMARK_CAN_BE_INVALID - }; - - // For each dirty region on a page in use from an old space call - // visit_dirty_region callback. - // If either visit_dirty_region or callback can cause an allocation - // in old space and changes in allocation watermark then - // can_preallocate_during_iteration should be set to true. - // All pages will be marked as having invalid watermark upon - // iteration completion. - void IterateDirtyRegions( - PagedSpace* space, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback callback, - ExpectedPageWatermarkState expected_page_watermark_state); - - // Interpret marks as a bitvector of dirty marks for regions of size - // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering - // memory interval from start to top. For each dirty region call a - // visit_dirty_region callback. Return updated bitvector of dirty marks. - uint32_t IterateDirtyRegions(uint32_t marks, - Address start, - Address end, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback callback); - // Iterate pointers to from semispace of new space found in memory interval // from start to end. - // Update dirty marks for page containing start address. void IterateAndMarkPointersToFromSpace(Address start, Address end, ObjectSlotCallback callback); - // Iterate pointers to new space found in memory interval from start to end. - // Return true if pointers to new space was found. - static bool IteratePointersInDirtyRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback callback); - - - // Iterate pointers to new space found in memory interval from start to end. - // This interval is considered to belong to the map space. - // Return true if pointers to new space was found. - static bool IteratePointersInDirtyMapsRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback callback); - - // Returns whether the object resides in new space. inline bool InNewSpace(Object* object); + inline bool InNewSpace(Address addr); + inline bool InNewSpacePage(Address addr); inline bool InFromSpace(Object* object); inline bool InToSpace(Object* object); @@ -1057,11 +1196,19 @@ class Heap { roots_[kEmptyScriptRootIndex] = script; } + void public_set_store_buffer_top(Address* top) { + roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); + } + // Update the next script id. inline void SetLastScriptId(Object* last_script_id); // Generated code can embed this address to get access to the roots. - Object** roots_address() { return roots_; } + Object** roots_array_start() { return roots_; } + + Address* store_buffer_top_address() { + return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); + } // Get address of global contexts list for serialization support. Object** global_contexts_list_address() { @@ -1075,6 +1222,10 @@ class Heap { // Verify the heap is in its normal state before or after a GC. void Verify(); + void OldPointerSpaceCheckStoreBuffer(); + void MapSpaceCheckStoreBuffer(); + void LargeObjectSpaceCheckStoreBuffer(); + // Report heap statistics. void ReportHeapStatistics(const char* title); void ReportCodeStatistics(const char* title); @@ -1170,26 +1321,59 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, PretenureFlag pretenure); + inline intptr_t PromotedTotalSize() { + return PromotedSpaceSize() + PromotedExternalMemorySize(); + } + // True if we have reached the allocation limit in the old generation that // should force the next GC (caused normally) to be a full one. - bool OldGenerationPromotionLimitReached() { - return (PromotedSpaceSize() + PromotedExternalMemorySize()) - > old_gen_promotion_limit_; + inline bool OldGenerationPromotionLimitReached() { + return PromotedTotalSize() > old_gen_promotion_limit_; } - intptr_t OldGenerationSpaceAvailable() { - return old_gen_allocation_limit_ - - (PromotedSpaceSize() + PromotedExternalMemorySize()); + inline intptr_t OldGenerationSpaceAvailable() { + return old_gen_allocation_limit_ - PromotedTotalSize(); } - // True if we have reached the allocation limit in the old generation that - // should artificially cause a GC right now. - bool OldGenerationAllocationLimitReached() { - return OldGenerationSpaceAvailable() < 0; + static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize; + static const intptr_t kMinimumAllocationLimit = + 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); + + // When we sweep lazily we initially guess that there is no garbage on the + // heap and set the limits for the next GC accordingly. As we sweep we find + // out that some of the pages contained garbage and we have to adjust + // downwards the size of the heap. This means the limits that control the + // timing of the next GC also need to be adjusted downwards. + void LowerOldGenLimits(intptr_t adjustment) { + size_of_old_gen_at_last_old_space_gc_ -= adjustment; + old_gen_promotion_limit_ = + OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_); + old_gen_allocation_limit_ = + OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_); } - // Can be called when the embedding application is idle. - bool IdleNotification(); + intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { + const int divisor = FLAG_stress_compaction ? 10 : 3; + intptr_t limit = + Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit); + limit += new_space_.Capacity(); + limit *= old_gen_limit_factor_; + intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; + return Min(limit, halfway_to_the_max); + } + + intptr_t OldGenAllocationLimit(intptr_t old_gen_size) { + const int divisor = FLAG_stress_compaction ? 8 : 2; + intptr_t limit = + Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit); + limit += new_space_.Capacity(); + limit *= old_gen_limit_factor_; + intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; + return Min(limit, halfway_to_the_max); + } + + // Implements the corresponding V8 API function. + bool IdleNotification(int hint); // Declare all the root indices. enum RootListIndex { @@ -1213,6 +1397,8 @@ class Heap { MUST_USE_RESULT MaybeObject* NumberToString( Object* number, bool check_number_string_cache = true); + MUST_USE_RESULT MaybeObject* Uint32ToString( + uint32_t value, bool check_number_string_cache = true); Map* MapForExternalArrayType(ExternalArrayType array_type); RootListIndex RootIndexForExternalArrayType( @@ -1224,31 +1410,48 @@ class Heap { // by pointer size. static inline void CopyBlock(Address dst, Address src, int byte_size); - inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size); - // Optimized version of memmove for blocks with pointer size aligned sizes and // pointer size aligned addresses. static inline void MoveBlock(Address dst, Address src, int byte_size); - inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size); - // Check new space expansion criteria and expand semispaces if it was hit. void CheckNewSpaceExpansionCriteria(); inline void IncrementYoungSurvivorsCounter(int survived) { + ASSERT(survived >= 0); young_survivors_after_last_gc_ = survived; survived_since_last_expansion_ += survived; } + inline bool NextGCIsLikelyToBeFull() { + if (FLAG_gc_global) return true; + + intptr_t total_promoted = PromotedTotalSize(); + + intptr_t adjusted_promotion_limit = + old_gen_promotion_limit_ - new_space_.Capacity(); + + if (total_promoted >= adjusted_promotion_limit) return true; + + intptr_t adjusted_allocation_limit = + old_gen_allocation_limit_ - new_space_.Capacity() / 5; + + if (PromotedSpaceSize() >= adjusted_allocation_limit) return true; + + return false; + } + + void UpdateNewSpaceReferencesInExternalStringTable( ExternalStringTableUpdaterCallback updater_func); + void UpdateReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func); + void ProcessWeakReferences(WeakObjectRetainer* retainer); + void VisitExternalResources(v8::ExternalResourceVisitor* visitor); + // Helper function that governs the promotion policy from new space to // old. If the object's old address lies below the new space's age // mark or if we've already filled the bottom 1/16th of the to space, @@ -1263,6 +1466,10 @@ class Heap { GCTracer* tracer() { return tracer_; } + // Returns the size of objects residing in non new spaces. + intptr_t PromotedSpaceSize(); + intptr_t PromotedSpaceSizeOfObjects(); + double total_regexp_code_generated() { return total_regexp_code_generated_; } void IncreaseTotalRegexpCodeGenerated(int size) { total_regexp_code_generated_ += size; @@ -1281,6 +1488,29 @@ class Heap { return &mark_compact_collector_; } + StoreBuffer* store_buffer() { + return &store_buffer_; + } + + Marking* marking() { + return &marking_; + } + + IncrementalMarking* incremental_marking() { + return &incremental_marking_; + } + + bool IsSweepingComplete() { + return old_data_space()->IsSweepingComplete() && + old_pointer_space()->IsSweepingComplete(); + } + + bool AdvanceSweepers(int step_size) { + bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size); + sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size); + return sweeping_complete; + } + ExternalStringTable* external_string_table() { return &external_string_table_; } @@ -1291,22 +1521,46 @@ class Heap { } inline Isolate* isolate(); - bool is_safe_to_read_maps() { return is_safe_to_read_maps_; } - void CallGlobalGCPrologueCallback() { + inline void CallGlobalGCPrologueCallback() { if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); } - void CallGlobalGCEpilogueCallback() { + inline void CallGlobalGCEpilogueCallback() { if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); } + inline bool OldGenerationAllocationLimitReached(); + + inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { + scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); + } + + void QueueMemoryChunkForFree(MemoryChunk* chunk); + void FreeQueuedChunks(); + + // Completely clear the Instanceof cache (to stop it keeping objects alive + // around a GC). + inline void CompletelyClearInstanceofCache(); + + // The roots that have an index less than this are always in old space. + static const int kOldSpaceRoots = 0x20; + + bool idle_notification_will_schedule_next_gc() { + return idle_notification_will_schedule_next_gc_; + } + uint32_t HashSeed() { uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); ASSERT(FLAG_randomize_hashes || seed == 0); return seed; } + void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { + ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); + set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); + } + private: Heap(); @@ -1314,12 +1568,12 @@ class Heap { // more expedient to get at the isolate directly from within Heap methods. Isolate* isolate_; + intptr_t code_range_size_; int reserved_semispace_size_; int max_semispace_size_; int initial_semispace_size_; intptr_t max_old_generation_size_; intptr_t max_executable_size_; - intptr_t code_range_size_; // For keeping track of how much data has survived // scavenge since last new space expansion. @@ -1334,6 +1588,8 @@ class Heap { // For keeping track of context disposals. int contexts_disposed_; + int scan_on_scavenge_pages_; + #if defined(V8_TARGET_ARCH_X64) static const int kMaxObjectSizeInNewSpace = 1024*KB; #else @@ -1350,13 +1606,9 @@ class Heap { HeapState gc_state_; int gc_post_processing_depth_; - // Returns the size of object residing in non new spaces. - intptr_t PromotedSpaceSize(); - // Returns the amount of external memory registered since last global gc. int PromotedExternalMemorySize(); - int mc_count_; // how many mark-compact collections happened int ms_count_; // how many mark-sweep collections happened unsigned int gc_count_; // how many gc happened @@ -1364,7 +1616,10 @@ class Heap { int unflattened_strings_length_; #define ROOT_ACCESSOR(type, name, camel_name) \ - inline void set_##name(type* value) { \ + inline void set_##name(type* value) { \ + /* The deserializer makes use of the fact that these common roots are */ \ + /* never in new space and never on a page that is being compacted. */ \ + ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ roots_[k##camel_name##RootIndex] = value; \ } ROOT_LIST(ROOT_ACCESSOR) @@ -1385,6 +1640,10 @@ class Heap { HeapDebugUtils* debug_utils_; #endif // DEBUG + // Indicates that the new space should be kept small due to high promotion + // rates caused by the mutator allocating a lot of long-lived objects. + bool new_space_high_promotion_mode_active_; + // Limit that triggers a global GC on the next (normally caused) GC. This // is checked when we have already decided to do a GC to help determine // which collector to invoke. @@ -1395,6 +1654,13 @@ class Heap { // every allocation in large object space. intptr_t old_gen_allocation_limit_; + // Sometimes the heuristics dictate that those limits are increased. This + // variable records that fact. + int old_gen_limit_factor_; + + // Used to adjust the limits that control the timing of the next GC. + intptr_t size_of_old_gen_at_last_old_space_gc_; + // Limit on the amount of externally allocated memory allowed // between global GCs. If reached a global GC is forced. intptr_t external_allocation_limit_; @@ -1414,6 +1680,8 @@ class Heap { Object* global_contexts_list_; + StoreBufferRebuilder store_buffer_rebuilder_; + struct StringTypeTable { InstanceType type; int size; @@ -1471,17 +1739,16 @@ class Heap { // Support for computing object sizes during GC. HeapObjectCallback gc_safe_size_of_old_object_; static int GcSafeSizeOfOldObject(HeapObject* object); - static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object); // Update the GC state. Called from the mark-compact collector. void MarkMapPointersAsEncoded(bool encoded) { - gc_safe_size_of_old_object_ = encoded - ? &GcSafeSizeOfOldObjectWithEncodedMap - : &GcSafeSizeOfOldObject; + ASSERT(!encoded); + gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; } // Checks whether a global GC is necessary - GarbageCollector SelectGarbageCollector(AllocationSpace space); + GarbageCollector SelectGarbageCollector(AllocationSpace space, + const char** reason); // Performs garbage collection // Returns whether there is a chance another major GC could @@ -1489,11 +1756,10 @@ class Heap { bool PerformGarbageCollection(GarbageCollector collector, GCTracer* tracer); - static const intptr_t kMinimumPromotionLimit = 2 * MB; - static const intptr_t kMinimumAllocationLimit = 8 * MB; inline void UpdateOldSpaceLimits(); + // Allocate an uninitialized object in map space. The behavior is identical // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // have to test the allocation space argument and (b) can reduce code size @@ -1522,14 +1788,17 @@ class Heap { Object* to_number, byte kind); + // Allocate a JSArray with no elements + MUST_USE_RESULT MaybeObject* AllocateJSArray( + ElementsKind elements_kind, + PretenureFlag pretenure = NOT_TENURED); + // Allocate empty fixed array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray(); // Allocate empty fixed double array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); - void SwitchScavengingVisitorsTableIfProfilingWasEnabled(); - // Performs a minor collection in new generation. void Scavenge(); @@ -1538,16 +1807,15 @@ class Heap { Object** pointer); Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); + static void ScavengeStoreBufferCallback(Heap* heap, + MemoryChunk* page, + StoreBufferEvent event); // Performs a major collection in the whole heap. void MarkCompact(GCTracer* tracer); // Code to be run before and after mark-compact. - void MarkCompactPrologue(bool is_compacting); - - // Completely clear the Instanceof cache (to stop it keeping objects alive - // around a GC). - inline void CompletelyClearInstanceofCache(); + void MarkCompactPrologue(); // Record statistics before and after garbage collection. void ReportStatisticsBeforeGC(); @@ -1557,12 +1825,11 @@ class Heap { static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); // Initializes a function with a shared part and prototype. - // Returns the function. // Note: this code was factored out of AllocateFunction such that // other parts of the VM could use it. Specifically, a function that creates // instances of type JS_FUNCTION_TYPE benefit from the use of this function. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* InitializeFunction( + inline void InitializeFunction( JSFunction* function, SharedFunctionInfo* shared, Object* prototype); @@ -1573,8 +1840,13 @@ class Heap { GCTracer* tracer_; - // Initializes the number to string cache based on the max semispace size. - MUST_USE_RESULT MaybeObject* InitializeNumberStringCache(); + // Allocates a small number to string cache. + MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache(); + // Creates and installs the full-sized number string cache. + void AllocateFullSizeNumberStringCache(); + // Get the length of the number to string cache based on the max semispace + // size. + int FullSizeNumberStringCacheLength(); // Flush the number to string cache. void FlushNumberStringCache(); @@ -1582,11 +1854,13 @@ class Heap { enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING }; - static const int kYoungSurvivalRateThreshold = 90; + static const int kYoungSurvivalRateHighThreshold = 90; + static const int kYoungSurvivalRateLowThreshold = 10; static const int kYoungSurvivalRateAllowedDeviation = 15; int young_survivors_after_last_gc_; int high_survival_rate_period_length_; + int low_survival_rate_period_length_; double survival_rate_; SurvivalRateTrend previous_survival_rate_trend_; SurvivalRateTrend survival_rate_trend_; @@ -1619,6 +1893,16 @@ class Heap { } } + bool IsStableOrDecreasingSurvivalTrend() { + switch (survival_rate_trend()) { + case STABLE: + case DECREASING: + return true; + default: + return false; + } + } + bool IsIncreasingSurvivalTrend() { return survival_rate_trend() == INCREASING; } @@ -1627,8 +1911,39 @@ class Heap { return high_survival_rate_period_length_ > 0; } + bool IsLowSurvivalRate() { + return low_survival_rate_period_length_ > 0; + } + + void SelectScavengingVisitorsTable(); + + void StartIdleRound() { + mark_sweeps_since_idle_round_started_ = 0; + ms_count_at_last_idle_notification_ = ms_count_; + } + + void FinishIdleRound() { + mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound; + scavenges_since_last_idle_round_ = 0; + } + + bool EnoughGarbageSinceLastIdleRound() { + return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold); + } + + bool WorthStartingGCWhenIdle() { + if (contexts_disposed_ > 0) { + return true; + } + return incremental_marking()->WorthActivating(); + } + + // Returns true if no more GC work is left. + bool IdleGlobalGC(); + static const int kInitialSymbolTableSize = 2048; static const int kInitialEvalCacheSize = 64; + static const int kInitialNumberStringCacheSize = 256; // Maximum GC pause. int max_gc_pause_; @@ -1646,25 +1961,37 @@ class Heap { MarkCompactCollector mark_compact_collector_; - // This field contains the meaning of the WATERMARK_INVALIDATED flag. - // Instead of clearing this flag from all pages we just flip - // its meaning at the beginning of a scavenge. - intptr_t page_watermark_invalidated_mark_; + StoreBuffer store_buffer_; + + Marking marking_; + + IncrementalMarking incremental_marking_; int number_idle_notifications_; unsigned int last_idle_notification_gc_count_; bool last_idle_notification_gc_count_init_; + bool idle_notification_will_schedule_next_gc_; + int mark_sweeps_since_idle_round_started_; + int ms_count_at_last_idle_notification_; + unsigned int gc_count_at_last_idle_gc_; + int scavenges_since_last_idle_round_; + + static const int kMaxMarkSweepsInIdleRound = 7; + static const int kIdleScavengeThreshold = 5; + // Shared state read by the scavenge collector and set by ScavengeObject. PromotionQueue promotion_queue_; // Flag is set when the heap has been configured. The heap can be repeatedly - // configured through the API until it is setup. + // configured through the API until it is set up. bool configured_; ExternalStringTable external_string_table_; - bool is_safe_to_read_maps_; + VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; + + MemoryChunk* chunks_queued_for_free_; friend class Factory; friend class GCTracer; @@ -1716,32 +2043,15 @@ class HeapStats { class AlwaysAllocateScope { public: - AlwaysAllocateScope() { - // We shouldn't hit any nested scopes, because that requires - // non-handle code to call handle code. The code still works but - // performance will degrade, so we want to catch this situation - // in debug mode. - ASSERT(HEAP->always_allocate_scope_depth_ == 0); - HEAP->always_allocate_scope_depth_++; - } - - ~AlwaysAllocateScope() { - HEAP->always_allocate_scope_depth_--; - ASSERT(HEAP->always_allocate_scope_depth_ == 0); - } + inline AlwaysAllocateScope(); + inline ~AlwaysAllocateScope(); }; class LinearAllocationScope { public: - LinearAllocationScope() { - HEAP->linear_allocation_scope_depth_++; - } - - ~LinearAllocationScope() { - HEAP->linear_allocation_scope_depth_--; - ASSERT(HEAP->linear_allocation_scope_depth_ >= 0); - } + inline LinearAllocationScope(); + inline ~LinearAllocationScope(); }; @@ -1753,38 +2063,7 @@ class LinearAllocationScope { // objects in a heap space but above the allocation pointer. class VerifyPointersVisitor: public ObjectVisitor { public: - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - ASSERT(HEAP->Contains(object)); - ASSERT(object->map()->IsMap()); - } - } - } -}; - - -// Visitor class to verify interior pointers in spaces that use region marks -// to keep track of intergenerational references. -// As VerifyPointersVisitor but also checks that dirty marks are set -// for regions covering intergenerational references. -class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - ASSERT(HEAP->Contains(object)); - ASSERT(object->map()->IsMap()); - if (HEAP->InNewSpace(object)) { - ASSERT(HEAP->InToSpace(object)); - Address addr = reinterpret_cast<Address>(current); - ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr)); - } - } - } - } + inline void VisitPointers(Object** start, Object** end); }; #endif @@ -1860,7 +2139,6 @@ class HeapIterator BASE_EMBEDDED { public: enum HeapObjectsFiltering { kNoFiltering, - kFilterFreeListNodes, kFilterUnreachable }; @@ -1900,11 +2178,17 @@ class KeyedLookupCache { // Clear the cache. void Clear(); - static const int kLength = 64; + static const int kLength = 256; static const int kCapacityMask = kLength - 1; - static const int kMapHashShift = 2; + static const int kMapHashShift = 5; + static const int kHashMask = -4; // Zero the last two bits. + static const int kEntriesPerBucket = 4; static const int kNotFound = -1; + // kEntriesPerBucket should be a power of 2. + STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); + STATIC_ASSERT(kEntriesPerBucket == -kHashMask); + private: KeyedLookupCache() { for (int i = 0; i < kLength; ++i) { @@ -2005,72 +2289,47 @@ class DescriptorLookupCache { }; -// A helper class to document/test C++ scopes where we do not -// expect a GC. Usage: -// -// /* Allocation not allowed: we cannot handle a GC in this scope. */ -// { AssertNoAllocation nogc; -// ... -// } - #ifdef DEBUG - class DisallowAllocationFailure { public: - DisallowAllocationFailure() { - old_state_ = HEAP->disallow_allocation_failure_; - HEAP->disallow_allocation_failure_ = true; - } - ~DisallowAllocationFailure() { - HEAP->disallow_allocation_failure_ = old_state_; - } + inline DisallowAllocationFailure(); + inline ~DisallowAllocationFailure(); + private: bool old_state_; }; +#endif + +// A helper class to document/test C++ scopes where we do not +// expect a GC. Usage: +// +// /* Allocation not allowed: we cannot handle a GC in this scope. */ +// { AssertNoAllocation nogc; +// ... +// } class AssertNoAllocation { public: - AssertNoAllocation() { - old_state_ = HEAP->allow_allocation(false); - } - - ~AssertNoAllocation() { - HEAP->allow_allocation(old_state_); - } + inline AssertNoAllocation(); + inline ~AssertNoAllocation(); +#ifdef DEBUG private: bool old_state_; +#endif }; + class DisableAssertNoAllocation { public: - DisableAssertNoAllocation() { - old_state_ = HEAP->allow_allocation(true); - } - - ~DisableAssertNoAllocation() { - HEAP->allow_allocation(old_state_); - } + inline DisableAssertNoAllocation(); + inline ~DisableAssertNoAllocation(); +#ifdef DEBUG private: bool old_state_; -}; - -#else // ndef DEBUG - -class AssertNoAllocation { - public: - AssertNoAllocation() { } - ~AssertNoAllocation() { } -}; - -class DisableAssertNoAllocation { - public: - DisableAssertNoAllocation() { } - ~DisableAssertNoAllocation() { } -}; - #endif +}; // GCTracer collects and prints ONE line after each garbage collector // invocation IFF --trace_gc is used. @@ -2084,7 +2343,13 @@ class GCTracer BASE_EMBEDDED { MC_MARK, MC_SWEEP, MC_SWEEP_NEWSPACE, - MC_COMPACT, + MC_EVACUATE_PAGES, + MC_UPDATE_NEW_TO_NEW_POINTERS, + MC_UPDATE_ROOT_TO_NEW_POINTERS, + MC_UPDATE_OLD_TO_NEW_POINTERS, + MC_UPDATE_POINTERS_TO_EVACUATED, + MC_UPDATE_POINTERS_BETWEEN_EVACUATED, + MC_UPDATE_MISC_POINTERS, MC_FLUSH_CODE, kNumberOfScopes }; @@ -2106,7 +2371,9 @@ class GCTracer BASE_EMBEDDED { double start_time_; }; - explicit GCTracer(Heap* heap); + explicit GCTracer(Heap* heap, + const char* gc_reason, + const char* collector_reason); ~GCTracer(); // Sets the collector. @@ -2118,16 +2385,6 @@ class GCTracer BASE_EMBEDDED { // Sets the full GC count. void set_full_gc_count(int count) { full_gc_count_ = count; } - // Sets the flag that this is a compacting full GC. - void set_is_compacting() { is_compacting_ = true; } - bool is_compacting() const { return is_compacting_; } - - // Increment and decrement the count of marked objects. - void increment_marked_count() { ++marked_count_; } - void decrement_marked_count() { --marked_count_; } - - int marked_count() { return marked_count_; } - void increment_promoted_objects_size(int object_size) { promoted_objects_size_ += object_size; } @@ -2137,38 +2394,27 @@ class GCTracer BASE_EMBEDDED { const char* CollectorString(); // Returns size of object in heap (in MB). - double SizeOfHeapObjects() { - return (static_cast<double>(HEAP->SizeOfObjects())) / MB; - } + inline double SizeOfHeapObjects(); + + // Timestamp set in the constructor. + double start_time_; + + // Size of objects in heap set in constructor. + intptr_t start_object_size_; + + // Size of memory allocated from OS set in constructor. + intptr_t start_memory_size_; - double start_time_; // Timestamp set in the constructor. - intptr_t start_size_; // Size of objects in heap set in constructor. - GarbageCollector collector_; // Type of collector. + // Type of collector. + GarbageCollector collector_; - // A count (including this one, eg, the first collection is 1) of the + // A count (including this one, e.g. the first collection is 1) of the // number of garbage collections. unsigned int gc_count_; // A count (including this one) of the number of full garbage collections. int full_gc_count_; - // True if the current GC is a compacting full collection, false - // otherwise. - bool is_compacting_; - - // True if the *previous* full GC cwas a compacting collection (will be - // false if there has not been a previous full GC). - bool previous_has_compacted_; - - // On a full GC, a count of the number of marked objects. Incremented - // when an object is marked and decremented when an object's mark bit is - // cleared. Will be zero on a scavenge collection. - int marked_count_; - - // The count from the end of the previous full GC. Will be zero if there - // was no previous full GC. - int previous_marked_count_; - // Amounts of time spent in different scopes during GC. double scopes_[Scope::kNumberOfScopes]; @@ -2187,7 +2433,17 @@ class GCTracer BASE_EMBEDDED { // Size of objects promoted during the current collection. intptr_t promoted_objects_size_; + // Incremental marking steps counters. + int steps_count_; + double steps_took_; + double longest_step_; + int steps_count_since_last_gc_; + double steps_took_since_last_gc_; + Heap* heap_; + + const char* gc_reason_; + const char* collector_reason_; }; @@ -2298,6 +2554,46 @@ class WeakObjectRetainer { }; +// Intrusive object marking uses least significant bit of +// heap object's map word to mark objects. +// Normally all map words have least significant bit set +// because they contain tagged map pointer. +// If the bit is not set object is marked. +// All objects should be unmarked before resuming +// JavaScript execution. +class IntrusiveMarking { + public: + static bool IsMarked(HeapObject* object) { + return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; + } + + static void ClearMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); + ASSERT(!IsMarked(object)); + } + + static void SetMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); + ASSERT(IsMarked(object)); + } + + static Map* MapOfMarkedObject(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); + } + + static int SizeOfMarkedObject(HeapObject* object) { + return object->SizeFromMap(MapOfMarkedObject(object)); + } + + private: + static const uintptr_t kNotMarkedBit = 0x1; + STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); +}; + + #if defined(DEBUG) || defined(LIVE_OBJECT_LIST) // Helper class for tracing paths to a search target Object from all roots. // The TracePathFrom() method can be used to trace paths from a specific @@ -2352,13 +2648,11 @@ class PathTracer : public ObjectVisitor { AssertNoAllocation no_alloc; // i.e. no gc allowed. + private: DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); }; #endif // DEBUG || LIVE_OBJECT_LIST - } } // namespace v8::internal -#undef HEAP - #endif // V8_HEAP_H_ |