diff options
Diffstat (limited to 'deps/v8/src/spaces.cc')
-rw-r--r-- | deps/v8/src/spaces.cc | 39 |
1 files changed, 15 insertions, 24 deletions
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index f467f710ce..61b318118a 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -95,6 +95,10 @@ void HeapObjectIterator::Initialize(PagedSpace* space, cur_end_ = end; page_mode_ = mode; size_func_ = size_f; + +#ifdef DEBUG + Verify(); +#endif } @@ -119,6 +123,13 @@ bool HeapObjectIterator::AdvanceToNextPage() { } +#ifdef DEBUG +void HeapObjectIterator::Verify() { + // TODO(gc): We should do something here. +} +#endif + + // ----------------------------------------------------------------------------- // CodeRange @@ -1898,24 +1909,11 @@ intptr_t FreeList::SumFreeLists() { bool NewSpace::ReserveSpace(int bytes) { // We can't reliably unpack a partial snapshot that needs more new space - // space than the minimum NewSpace size. The limit can be set lower than - // the end of new space either because there is more space on the next page - // or because we have lowered the limit in order to get periodic incremental - // marking. The most reliable way to ensure that there is linear space is - // to do the allocation, then rewind the limit. + // space than the minimum NewSpace size. ASSERT(bytes <= InitialCapacity()); - MaybeObject* maybe = AllocateRawInternal(bytes); - Object* object = NULL; - if (!maybe->ToObject(&object)) return false; - HeapObject* allocation = HeapObject::cast(object); + Address limit = allocation_info_.limit; Address top = allocation_info_.top; - if ((top - bytes) == allocation->address()) { - allocation_info_.top = allocation->address(); - return true; - } - // There may be a borderline case here where the allocation succeeded, but - // the limit and top have moved on to a new page. In that case we try again. - return ReserveSpace(bytes); + return limit - top >= bytes; } @@ -2280,11 +2278,8 @@ HeapObject* LargeObjectIterator::Next() { // ----------------------------------------------------------------------------- // LargeObjectSpace -LargeObjectSpace::LargeObjectSpace(Heap* heap, - intptr_t max_capacity, - AllocationSpace id) +LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis - max_capacity_(max_capacity), first_page_(NULL), size_(0), page_count_(0), @@ -2324,10 +2319,6 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, return Failure::RetryAfterGC(identity()); } - if (Size() + object_size > max_capacity_) { - return Failure::RetryAfterGC(identity()); - } - LargePage* page = heap()->isolate()->memory_allocator()-> AllocateLargePage(object_size, executable, this); if (page == NULL) return Failure::RetryAfterGC(identity()); |