summaryrefslogtreecommitdiff
path: root/deps/v8/src/spaces-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/spaces-inl.h')
-rw-r--r--deps/v8/src/spaces-inl.h20
1 files changed, 2 insertions, 18 deletions
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index ed78fc7a15..3709009c9b 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -164,7 +164,7 @@ Page* Page::Initialize(Heap* heap,
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
+ ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
@@ -295,27 +295,11 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
-#ifdef DEBUG
- // If we are stressing compaction we waste some memory in new space
- // in order to get more frequent GCs.
- if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
- int filler_size = size_in_bytes * 4;
- for (int i = 0; i < filler_size; i += kPointerSize) {
- *(reinterpret_cast<Object**>(old_top + i)) =
- HEAP->one_pointer_filler_map();
- }
- old_top += filler_size;
- allocation_info_.top += filler_size;
- }
- }
-#endif
-
if (allocation_info_.limit - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(old_top);
+ Object* obj = HeapObject::FromAddress(allocation_info_.top);
allocation_info_.top += size_in_bytes;
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);