diff options
Diffstat (limited to 'deps/v8/src/heap/heap.h')
-rw-r--r-- | deps/v8/src/heap/heap.h | 245 |
1 files changed, 87 insertions, 158 deletions
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h index ef8d912bfb..29aa5aad76 100644 --- a/deps/v8/src/heap/heap.h +++ b/deps/v8/src/heap/heap.h @@ -7,7 +7,6 @@ #include <atomic> #include <cmath> -#include <map> #include <memory> #include <unordered_map> #include <unordered_set> @@ -27,6 +26,8 @@ #include "src/common/assert-scope.h" #include "src/common/globals.h" #include "src/heap/allocation-observer.h" +#include "src/heap/allocation-result.h" +#include "src/heap/heap-allocator.h" #include "src/init/heap-symbols.h" #include "src/objects/allocation-site.h" #include "src/objects/fixed-array.h" @@ -126,16 +127,10 @@ enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes }; enum class RetainingPathOption { kDefault, kTrackEphemeronPath }; -enum class AllocationOrigin { - kGeneratedCode = 0, - kRuntime = 1, - kGC = 2, - kFirstAllocationOrigin = kGeneratedCode, - kLastAllocationOrigin = kGC, - kNumberOfAllocationOrigins = kLastAllocationOrigin + 1 -}; - -enum class GarbageCollectionReason { +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. If you add new items here, update +// src/tools/metrics/histograms/enums.xml in chromium. +enum class GarbageCollectionReason : int { kUnknown = 0, kAllocationFailure = 1, kAllocationLimit = 2, @@ -162,11 +157,14 @@ enum class GarbageCollectionReason { kGlobalAllocationLimit = 23, kMeasureMemory = 24, kBackgroundAllocationFailure = 25, - // If you add new items here, then update the incremental_marking_reason, - // mark_compact_reason, and scavenge_reason counters in counters.h. - // Also update src/tools/metrics/histograms/enums.xml in chromium. + + kLastReason = kBackgroundAllocationFailure, }; +static_assert(kGarbageCollectionReasonMaxValue == + static_cast<int>(GarbageCollectionReason::kLastReason), + "The value of kGarbageCollectionReasonMaxValue is inconsistent."); + enum class YoungGenerationHandling { kRegularScavenge = 0, kFastPromotionDuringScavenge = 1, @@ -208,44 +206,6 @@ class StrongRootsEntry final { friend class Heap; }; -class AllocationResult { - public: - static inline AllocationResult Retry(AllocationSpace space) { - return AllocationResult(space); - } - - // Implicit constructor from Object. - AllocationResult(Object object) // NOLINT - : object_(object) { - // AllocationResults can't return Smis, which are used to represent - // failure and the space to retry in. - CHECK(!object.IsSmi()); - } - - AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {} - - inline bool IsRetry() { return object_.IsSmi(); } - inline HeapObject ToObjectChecked(); - inline HeapObject ToObject(); - inline Address ToAddress(); - inline AllocationSpace RetrySpace(); - - template <typename T> - bool To(T* obj) { - if (IsRetry()) return false; - *obj = T::cast(object_); - return true; - } - - private: - explicit AllocationResult(AllocationSpace space) - : object_(Smi::FromInt(static_cast<int>(space))) {} - - Object object_; -}; - -STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize); - #ifdef DEBUG struct CommentStatistic { const char* comment; @@ -265,8 +225,6 @@ using EphemeronRememberedSet = std::unordered_map<EphemeronHashTable, std::unordered_set<int>, Object::Hasher>; -using CollectionEpoch = uint32_t; - class Heap { public: // Stores ephemeron entries where the EphemeronHashTable is in old-space, @@ -480,12 +438,8 @@ class Heap { } static inline GarbageCollector YoungGenerationCollector() { -#if ENABLE_MINOR_MC return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR : GarbageCollector::SCAVENGER; -#else - return GarbageCollector::SCAVENGER; -#endif // ENABLE_MINOR_MC } static inline const char* CollectorName(GarbageCollector collector) { @@ -545,8 +499,6 @@ class Heap { void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk); - void UpdateCurrentEpoch(GarbageCollector collector); - inline Address* NewSpaceAllocationTopAddress(); inline Address* NewSpaceAllocationLimitAddress(); inline Address* OldSpaceAllocationTopAddress(); @@ -647,9 +599,6 @@ class Heap { void CheckHandleCount(); - // Number of "runtime allocations" done so far. - uint32_t allocations_count() { return allocations_count_; } - // Print short heap statistics. void PrintShortHeapStatistics(); @@ -800,16 +749,9 @@ class Heap { V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context, Handle<Map> map); - // This event is triggered after successful allocation of a new object made - // by runtime. Allocations of target space for object evacuation do not - // trigger the event. In order to track ALL allocations one must turn off - // FLAG_inline_new. - inline void OnAllocationEvent(HeapObject object, int size_in_bytes); - // This event is triggered after object is moved to a new place. void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes); - inline bool CanAllocateInReadOnlySpace(); bool deserialization_complete() const { return deserialization_complete_; } // We can only invoke Safepoint() on the main thread local heap after @@ -866,6 +808,9 @@ class Heap { // (Re-)Initialize hash seed from flag or RNG. void InitializeHashSeed(); + // Invoked once for the process from V8::Initialize. + static void InitializeOncePerProcess(); + // Bootstraps the object heap with the core set of objects required to run. // Returns whether it succeeded. bool CreateHeapObjects(); @@ -893,6 +838,7 @@ class Heap { OldSpace* shared_old_space() { return shared_old_space_; } CodeSpace* code_space() { return code_space_; } MapSpace* map_space() { return map_space_; } + inline PagedSpace* space_for_maps(); OldLargeObjectSpace* lo_space() { return lo_space_; } CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; } NewLargeObjectSpace* new_lo_space() { return new_lo_space_; } @@ -912,6 +858,8 @@ class Heap { return memory_allocator_.get(); } + inline ConcurrentAllocator* concurrent_allocator_for_maps(); + inline Isolate* isolate(); MarkCompactCollector* mark_compact_collector() { @@ -919,17 +867,22 @@ class Heap { } MinorMarkCompactCollector* minor_mark_compact_collector() { - return minor_mark_compact_collector_; + return minor_mark_compact_collector_.get(); } ArrayBufferSweeper* array_buffer_sweeper() { return array_buffer_sweeper_.get(); } + // The potentially overreserved address space region reserved by the code + // range if it exists or empty region otherwise. const base::AddressRegion& code_region(); CodeRange* code_range() { return code_range_.get(); } + // The base of the code range if it exists or null address. + inline Address code_range_base(); + LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; } Heap* AsHeap() { return this; } @@ -1001,9 +954,6 @@ class Heap { // Inline allocation. ======================================================== // =========================================================================== - // Indicates whether inline bump-pointer allocation has been disabled. - bool inline_allocation_disabled() { return inline_allocation_disabled_; } - // Switch whether inline bump-pointer allocation should be used. V8_EXPORT_PRIVATE void EnableInlineAllocation(); V8_EXPORT_PRIVATE void DisableInlineAllocation(); @@ -1165,6 +1115,9 @@ class Heap { // - or it was communicated to GC using NotifyObjectLayoutChange. V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object, Map new_map); + // Checks that this is a safe map transition. + V8_EXPORT_PRIVATE void VerifySafeMapTransition(HeapObject object, + Map new_map); #endif // =========================================================================== @@ -1196,8 +1149,6 @@ class Heap { EmbedderHeapTracer* GetEmbedderHeapTracer() const; void RegisterExternallyReferencedObject(Address* location); - V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization( - EmbedderHeapTracer::EmbedderStackState stack_state); EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const; @@ -1626,23 +1577,24 @@ class Heap { #endif #ifdef V8_ENABLE_ALLOCATION_TIMEOUT - void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } -#endif + void V8_EXPORT_PRIVATE set_allocation_timeout(int allocation_timeout); +#endif // V8_ENABLE_ALLOCATION_TIMEOUT #ifdef DEBUG void VerifyCountersAfterSweeping(); void VerifyCountersBeforeConcurrentSweeping(); + void VerifyCommittedPhysicalMemory(); void Print(); void PrintHandles(); // Report code statistics. void ReportCodeStatistics(const char* title); -#endif +#endif // DEBUG void* GetRandomMmapAddr() { void* result = v8::internal::GetRandomMmapAddr(); #if V8_TARGET_ARCH_X64 -#if V8_OS_MACOSX +#if V8_OS_DARWIN // The Darwin kernel [as of macOS 10.12.5] does not clean up page // directory entries [PDE] created from mmap or mach_vm_allocate, even // after the region is destroyed. Using a virtual address space that is @@ -1652,7 +1604,7 @@ class Heap { // space. See crbug.com/700928. uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask; result = reinterpret_cast<void*>(mmap_region_base_ + offset); -#endif // V8_OS_MACOSX +#endif // V8_OS_DARWIN #endif // V8_TARGET_ARCH_X64 return result; } @@ -1667,16 +1619,13 @@ class Heap { static Isolate* GetIsolateFromWritableObject(HeapObject object); - CollectionEpoch epoch_young() { return epoch_young_; } - CollectionEpoch epoch_full() { return epoch_full_; } - - void UpdateEpochFull(); - // Ensure that we have swept all spaces in such a way that we can iterate // over all objects. void MakeHeapIterable(); private: + class AllocationTrackerForDebugging; + using ExternalStringTableUpdaterCallback = String (*)(Heap* heap, FullObjectSlot pointer); @@ -1811,7 +1760,8 @@ class Heap { // Performs garbage collection in a safepoint. // Returns the number of freed global handles. size_t PerformGarbageCollection( - GarbageCollector collector, + GarbageCollector collector, GarbageCollectionReason gc_reason, + const char* collector_reason, const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); // Performs garbage collection in the shared heap. @@ -1887,11 +1837,6 @@ class Heap { GCIdleTimeHeapState heap_state, double start_ms, double deadline_in_ms); - int NextAllocationTimeout(int current_timeout = 0); - inline void UpdateAllocationsHash(HeapObject object); - inline void UpdateAllocationsHash(uint32_t value); - void PrintAllocationsHash(); - void PrintMaxMarkingLimitReached(); void PrintMaxNewSpaceSizeReached(); @@ -1920,15 +1865,6 @@ class Heap { void InvokeIncrementalMarkingPrologueCallbacks(); void InvokeIncrementalMarkingEpilogueCallbacks(); - // Returns the timer used for a given GC type. - // - GCScavenger: young generation GC - // - GCCompactor: full GC - // - GCFinalzeMC: finalization of incremental full GC - // - GCFinalizeMCReduceMemory: finalization of incremental full GC with - // memory reduction - TimedHistogram* GCTypeTimer(GarbageCollector collector); - TimedHistogram* GCTypePriorityTimer(GarbageCollector collector); - // =========================================================================== // Pretenuring. ============================================================== // =========================================================================== @@ -1947,7 +1883,8 @@ class Heap { // Code that should be run before and after each GC. Includes some // reporting/verification activities when compiled with DEBUG set. - void GarbageCollectionPrologue(); + void GarbageCollectionPrologue(GarbageCollectionReason gc_reason, + const v8::GCCallbackFlags gc_callback_flags); void GarbageCollectionPrologueInSafepoint(); void GarbageCollectionEpilogue(GarbageCollector collector); void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector); @@ -2087,6 +2024,8 @@ class Heap { // Allocation methods. ======================================================= // =========================================================================== + HeapAllocator* allocator() { return &heap_allocator_; } + // Allocates a JS Map in the heap. V8_WARN_UNUSED_RESULT AllocationResult AllocateMap(InstanceType instance_type, int instance_size, @@ -2097,19 +2036,19 @@ class Heap { // hardware and OS allow. This is the single choke-point for allocations // performed by the runtime and should not be bypassed (to extend this to // inlined allocations, use the Heap::DisableInlineAllocation() support). - V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw( - int size_in_bytes, AllocationType allocation, - AllocationOrigin origin = AllocationOrigin::kRuntime, - AllocationAlignment alignment = kTaggedAligned); + V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult + AllocateRaw(int size_in_bytes, AllocationType allocation, + AllocationOrigin origin = AllocationOrigin::kRuntime, + AllocationAlignment alignment = kTaggedAligned); // This method will try to allocate objects quickly (AllocationType::kYoung) // otherwise it falls back to a slower path indicated by the mode. enum AllocationRetryMode { kLightRetry, kRetryOrFail }; template <AllocationRetryMode mode> - V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith( - int size, AllocationType allocation, - AllocationOrigin origin = AllocationOrigin::kRuntime, - AllocationAlignment alignment = kTaggedAligned); + V8_WARN_UNUSED_RESULT V8_INLINE HeapObject + AllocateRawWith(int size, AllocationType allocation, + AllocationOrigin origin = AllocationOrigin::kRuntime, + AllocationAlignment alignment = kTaggedAligned); // Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap. V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail( @@ -2117,25 +2056,6 @@ class Heap { AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationAlignment alignment = kTaggedAligned); - // This method will try to perform an allocation of a given size of a given - // AllocationType. If the allocation fails, a regular full garbage collection - // is triggered and the allocation is retried. This is performed multiple - // times. If after that retry procedure the allocation still fails nullptr is - // returned. - V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath( - int size, AllocationType allocation, AllocationOrigin origin, - AllocationAlignment alignment = kTaggedAligned); - - // This method will try to perform an allocation of a given size of a given - // AllocationType. If the allocation fails, a regular full garbage collection - // is triggered and the allocation is retried. This is performed multiple - // times. If after that retry procedure the allocation still fails a "hammer" - // garbage collection is triggered which tries to significantly reduce memory. - // If the allocation still fails after that a fatal error is thrown. - V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath( - int size, AllocationType allocation, AllocationOrigin origin, - AllocationAlignment alignment = kTaggedAligned); - // Allocates a heap object based on the map. V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map, AllocationType allocation); @@ -2179,12 +2099,16 @@ class Heap { return allocation_type_for_in_place_internalizable_strings_; } + bool IsStressingScavenge(); + ExternalMemoryAccounting external_memory_; // This can be calculated directly from a pointer to the heap; however, it is // more expedient to get at the isolate directly from within Heap methods. Isolate* isolate_ = nullptr; + HeapAllocator heap_allocator_; + // These limits are initialized in Heap::ConfigureHeap based on the resource // constraints and flags. size_t code_range_size_ = 0; @@ -2276,18 +2200,12 @@ class Heap { // Returns the amount of external memory registered since last global gc. V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact(); - // How many "runtime allocations" happened. - uint32_t allocations_count_ = 0; - - // Running hash over allocations performed. - uint32_t raw_allocations_hash_ = 0; - // Starts marking when stress_marking_percentage_% of the marking start limit // is reached. - int stress_marking_percentage_ = 0; + std::atomic<int> stress_marking_percentage_{0}; - // Observer that causes more frequent checks for reached incremental marking - // limit. + // Observer that causes more frequent checks for reached incremental + // marking limit. AllocationObserver* stress_marking_observer_ = nullptr; // Observer that can cause early scavenge start. @@ -2321,10 +2239,6 @@ class Heap { std::atomic<size_t> old_generation_allocation_limit_{0}; size_t global_allocation_limit_ = 0; - // Indicates that inline bump-pointer allocation has been globally disabled - // for all spaces. This is used to disable allocations in generated code. - bool inline_allocation_disabled_ = false; - // Weak list heads, threaded through the objects. // List heads are initialized lazily and contain the undefined_value at start. // {native_contexts_list_} is an Address instead of an Object to allow the use @@ -2369,7 +2283,7 @@ class Heap { std::unique_ptr<GCTracer> tracer_; std::unique_ptr<MarkCompactCollector> mark_compact_collector_; - MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr; + std::unique_ptr<MinorMarkCompactCollector> minor_mark_compact_collector_; std::unique_ptr<ScavengerCollector> scavenger_collector_; std::unique_ptr<ArrayBufferSweeper> array_buffer_sweeper_; @@ -2386,6 +2300,8 @@ class Heap { std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_; std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_; std::unique_ptr<MarkingBarrier> marking_barrier_; + std::unique_ptr<AllocationTrackerForDebugging> + allocation_tracker_for_debugging_; // This object controls virtual space reserved for code on the V8 heap. This // is only valid for 64-bit architectures where kRequiresCodeRange. @@ -2486,13 +2402,6 @@ class Heap { base::Mutex unprotected_memory_chunks_mutex_; std::unordered_set<MemoryChunk*> unprotected_memory_chunks_; -#ifdef V8_ENABLE_ALLOCATION_TIMEOUT - // If the --gc-interval flag is set to a positive value, this - // variable holds the value indicating the number of allocations - // remain until the next failure and garbage collection. - int allocation_timeout_ = 0; -#endif // V8_ENABLE_ALLOCATION_TIMEOUT - std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_; std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_; // If an object is retained by an ephemeron, then the retaining key of the @@ -2509,11 +2418,6 @@ class Heap { std::unique_ptr<third_party_heap::Heap> tp_heap_; - // We need two epochs, since there can be scavenges during incremental - // marking. - CollectionEpoch epoch_young_ = 0; - CollectionEpoch epoch_full_ = 0; - // Classes in "heap" can be friends. friend class AlwaysAllocateScope; friend class ArrayBufferCollector; @@ -2522,6 +2426,7 @@ class Heap { friend class EvacuateVisitorBase; friend class GCCallbacksScope; friend class GCTracer; + friend class HeapAllocator; friend class HeapObjectIterator; friend class ScavengeTaskObserver; friend class IgnoreLocalGCRequests; @@ -2611,6 +2516,7 @@ class V8_NODISCARD AlwaysAllocateScope { friend class AlwaysAllocateScopeForTesting; friend class Evacuator; friend class Heap; + friend class HeapAllocator; friend class Isolate; explicit inline AlwaysAllocateScope(Heap* heap); @@ -2653,9 +2559,8 @@ class V8_NODISCARD CodeSpaceMemoryModificationScope { Heap* heap_; }; -// The CodePageCollectionMemoryModificationScope can only be used by the main -// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is -// already active. +// The CodePageCollectionMemoryModificationScope can be used by any thread. It +// will not be enabled if a CodeSpaceMemoryModificationScope is already active. class V8_NODISCARD CodePageCollectionMemoryModificationScope { public: explicit inline CodePageCollectionMemoryModificationScope(Heap* heap); @@ -2867,6 +2772,30 @@ struct StrongRootBlockAllocator::rebind { }; }; +class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final { + public: + enum Origin { + kImplicitThroughTask, + kExplicitInvocation, + }; + + // Only used for testing where the Origin is always an explicit invocation. + static EmbedderStackStateScope ExplicitScopeForTesting( + LocalEmbedderHeapTracer* local_tracer, + EmbedderHeapTracer::EmbedderStackState stack_state); + + EmbedderStackStateScope(Heap* heap, Origin origin, + EmbedderHeapTracer::EmbedderStackState stack_state); + ~EmbedderStackStateScope(); + + private: + EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer, + EmbedderHeapTracer::EmbedderStackState stack_state); + + LocalEmbedderHeapTracer* const local_tracer_; + const EmbedderHeapTracer::EmbedderStackState old_stack_state_; +}; + } // namespace internal } // namespace v8 |