diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-10-27 00:48:23 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-10-27 00:48:23 -0700 |
commit | 92f5a5d3caf01f382f90c235e9057590a5e76870 (patch) | |
tree | edf52631145345943dc82e1b783de81ba89208af /deps/v8/src/spaces.cc | |
parent | 528c28587f11b64616ffa22b5cf3f53bea831792 (diff) | |
download | node-new-92f5a5d3caf01f382f90c235e9057590a5e76870.tar.gz |
Upgrade V8 to 3.7.1
Diffstat (limited to 'deps/v8/src/spaces.cc')
-rw-r--r-- | deps/v8/src/spaces.cc | 39 |
1 files changed, 24 insertions, 15 deletions
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 61b318118a..f467f710ce 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -95,10 +95,6 @@ void HeapObjectIterator::Initialize(PagedSpace* space, cur_end_ = end; page_mode_ = mode; size_func_ = size_f; - -#ifdef DEBUG - Verify(); -#endif } @@ -123,13 +119,6 @@ bool HeapObjectIterator::AdvanceToNextPage() { } -#ifdef DEBUG -void HeapObjectIterator::Verify() { - // TODO(gc): We should do something here. -} -#endif - - // ----------------------------------------------------------------------------- // CodeRange @@ -1909,11 +1898,24 @@ intptr_t FreeList::SumFreeLists() { bool NewSpace::ReserveSpace(int bytes) { // We can't reliably unpack a partial snapshot that needs more new space - // space than the minimum NewSpace size. + // space than the minimum NewSpace size. The limit can be set lower than + // the end of new space either because there is more space on the next page + // or because we have lowered the limit in order to get periodic incremental + // marking. The most reliable way to ensure that there is linear space is + // to do the allocation, then rewind the limit. ASSERT(bytes <= InitialCapacity()); - Address limit = allocation_info_.limit; + MaybeObject* maybe = AllocateRawInternal(bytes); + Object* object = NULL; + if (!maybe->ToObject(&object)) return false; + HeapObject* allocation = HeapObject::cast(object); Address top = allocation_info_.top; - return limit - top >= bytes; + if ((top - bytes) == allocation->address()) { + allocation_info_.top = allocation->address(); + return true; + } + // There may be a borderline case here where the allocation succeeded, but + // the limit and top have moved on to a new page. In that case we try again. + return ReserveSpace(bytes); } @@ -2278,8 +2280,11 @@ HeapObject* LargeObjectIterator::Next() { // ----------------------------------------------------------------------------- // LargeObjectSpace -LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) +LargeObjectSpace::LargeObjectSpace(Heap* heap, + intptr_t max_capacity, + AllocationSpace id) : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis + max_capacity_(max_capacity), first_page_(NULL), size_(0), page_count_(0), @@ -2319,6 +2324,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, return Failure::RetryAfterGC(identity()); } + if (Size() + object_size > max_capacity_) { + return Failure::RetryAfterGC(identity()); + } + LargePage* page = heap()->isolate()->memory_allocator()-> AllocateLargePage(object_size, executable, this); if (page == NULL) return Failure::RetryAfterGC(identity()); |