summaryrefslogtreecommitdiff
path: root/deps/v8/src/spaces.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/spaces.h')
-rw-r--r--deps/v8/src/spaces.h2577
1 files changed, 1133 insertions, 1444 deletions
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index ce8e382aaa..f1564967e1 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -49,47 +49,45 @@ class Isolate;
//
// The semispaces of the young generation are contiguous. The old and map
// spaces consists of a list of pages. A page has a page header and an object
-// area.
+// area. A page size is deliberately chosen as 8K bytes.
+// The first word of a page is an opaque page header that has the
+// address of the next page and its ownership information. The second word may
+// have the allocation top address of this page. Heap objects are aligned to the
+// pointer size.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged. Pages in large object space
-// may be larger than the page size.
+// may be larger than 8K.
//
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references. See store-buffer.h.
+// A card marking write barrier is used to keep track of intergenerational
+// references. Old space pages are divided into regions of Page::kRegionSize
+// size. Each region has a corresponding dirty bit in the page header which is
+// set if the region might contain pointers to new space. For details about
+// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
+// method body.
//
-// During scavenges and mark-sweep collections we sometimes (after a store
-// buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
-// garbage pointers to new space: every pointer aligned word which satisfies
-// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
-// special layout (e.g. no bare integer fields). This requirement does not
-// apply to map space which is iterated in a special fashion. However we still
-// require pointer fields of dead maps to be cleaned.
+// During scavenges and mark-sweep collections we iterate intergenerational
+// pointers without decoding heap object maps so if the page belongs to old
+// pointer space or large object space it is essential to guarantee that
+// the page does not contain any garbage pointers to new space: every pointer
+// aligned word which satisfies the Heap::InNewSpace() predicate must be a
+// pointer to a live heap object in new space. Thus objects in old pointer
+// and large object spaces should have a special layout (e.g. no bare integer
+// fields). This requirement does not apply to map space which is iterated in
+// a special fashion. However we still require pointer fields of dead maps to
+// be cleaned.
//
-// To enable lazy cleaning of old space pages we can mark chunks of the page
-// as being garbage. Garbage sections are marked with a special map. These
-// sections are skipped when scanning the page, even if we are otherwise
-// scanning without regard for object boundaries. Garbage sections are chained
-// together to form a free list after a GC. Garbage sections created outside
-// of GCs by object trunctation etc. may not be in the free list chain. Very
-// small free spaces are ignored, they need only be cleaned of bogus pointers
-// into new space.
+// To enable lazy cleaning of old space pages we use a notion of allocation
+// watermark. Every pointer under watermark is considered to be well formed.
+// Page allocation watermark is not necessarily equal to page allocation top but
+// all alive objects on page should reside under allocation watermark.
+// During scavenge allocation watermark might be bumped and invalid pointers
+// might appear below it. To avoid following them we store a valid watermark
+// into special field in the page header and set a page WATERMARK_INVALIDATED
+// flag. For details see comments in the Page::SetAllocationWatermark() method
+// body.
//
-// Each page may have up to one special garbage section. The start of this
-// section is denoted by the top field in the space. The end of the section
-// is denoted by the limit field in the space. This special garbage section
-// is not marked with a free space map in the data. The point of this section
-// is to enable linear allocation without having to constantly update the byte
-// array every time the top field is updated and a new object is created. The
-// special garbage section is not in the chain of garbage sections.
-//
-// Since the top and limit fields are in the space, not the page, only one page
-// has a special garbage section, and if the top and limit are equal then there
-// is no special garbage section.
// Some assertion macros used in the debugging mode.
@@ -116,522 +114,30 @@ class Isolate;
class PagedSpace;
class MemoryAllocator;
class AllocationInfo;
-class Space;
-class FreeList;
-class MemoryChunk;
-
-class MarkBit {
- public:
- typedef uint32_t CellType;
-
- inline MarkBit(CellType* cell, CellType mask, bool data_only)
- : cell_(cell), mask_(mask), data_only_(data_only) { }
-
- inline CellType* cell() { return cell_; }
- inline CellType mask() { return mask_; }
-
-#ifdef DEBUG
- bool operator==(const MarkBit& other) {
- return cell_ == other.cell_ && mask_ == other.mask_;
- }
-#endif
-
- inline void Set() { *cell_ |= mask_; }
- inline bool Get() { return (*cell_ & mask_) != 0; }
- inline void Clear() { *cell_ &= ~mask_; }
-
- inline bool data_only() { return data_only_; }
-
- inline MarkBit Next() {
- CellType new_mask = mask_ << 1;
- if (new_mask == 0) {
- return MarkBit(cell_ + 1, 1, data_only_);
- } else {
- return MarkBit(cell_, new_mask, data_only_);
- }
- }
-
- private:
- CellType* cell_;
- CellType mask_;
- // This boolean indicates that the object is in a data-only space with no
- // pointers. This enables some optimizations when marking.
- // It is expected that this field is inlined and turned into control flow
- // at the place where the MarkBit object is created.
- bool data_only_;
-};
-
-
-// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
- public:
- static const uint32_t kBitsPerCell = 32;
- static const uint32_t kBitsPerCellLog2 = 5;
- static const uint32_t kBitIndexMask = kBitsPerCell - 1;
- static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
- static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
-
- static const size_t kLength =
- (1 << kPageSizeBits) >> (kPointerSizeLog2);
-
- static const size_t kSize =
- (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
-
-
- static int CellsForLength(int length) {
- return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
- }
-
- int CellsCount() {
- return CellsForLength(kLength);
- }
-
- static int SizeFor(int cells_count) {
- return sizeof(MarkBit::CellType) * cells_count;
- }
-
- INLINE(static uint32_t IndexToCell(uint32_t index)) {
- return index >> kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellToIndex(uint32_t index)) {
- return index << kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
- return (index + kBitIndexMask) & ~kBitIndexMask;
- }
-
- INLINE(MarkBit::CellType* cells()) {
- return reinterpret_cast<MarkBit::CellType*>(this);
- }
-
- INLINE(Address address()) {
- return reinterpret_cast<Address>(this);
- }
-
- INLINE(static Bitmap* FromAddress(Address addr)) {
- return reinterpret_cast<Bitmap*>(addr);
- }
-
- inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
- MarkBit::CellType mask = 1 << (index & kBitIndexMask);
- MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(cell, mask, data_only);
- }
-
- static inline void Clear(MemoryChunk* chunk);
-
- static void PrintWord(uint32_t word, uint32_t himask = 0) {
- for (uint32_t mask = 1; mask != 0; mask <<= 1) {
- if ((mask & himask) != 0) PrintF("[");
- PrintF((mask & word) ? "1" : "0");
- if ((mask & himask) != 0) PrintF("]");
- }
- }
-
- class CellPrinter {
- public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
-
- void Print(uint32_t pos, uint32_t cell) {
- if (cell == seq_type) {
- seq_length++;
- return;
- }
-
- Flush();
-
- if (IsSeq(cell)) {
- seq_start = pos;
- seq_length = 0;
- seq_type = cell;
- return;
- }
-
- PrintF("%d: ", pos);
- PrintWord(cell);
- PrintF("\n");
- }
-
- void Flush() {
- if (seq_length > 0) {
- PrintF("%d: %dx%d\n",
- seq_start,
- seq_type == 0 ? 0 : 1,
- seq_length * kBitsPerCell);
- seq_length = 0;
- }
- }
-
- static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
-
- private:
- uint32_t seq_start;
- uint32_t seq_type;
- uint32_t seq_length;
- };
-
- void Print() {
- CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
- printer.Print(i, cells()[i]);
- }
- printer.Flush();
- PrintF("\n");
- }
-
- bool IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) return false;
- }
- return true;
- }
-};
-
-
-class SkipList;
-class SlotsBuffer;
-
-// MemoryChunk represents a memory region owned by a specific space.
-// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accomodate
-// any heap object.
-class MemoryChunk {
- public:
- // Only works if the pointer is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromAddress(Address a) {
- return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
- }
-
- // Only works for addresses in pointer spaces, not data or code spaces.
- static inline MemoryChunk* FromAnyPointerAddress(Address addr);
-
- Address address() { return reinterpret_cast<Address>(this); }
-
- bool is_valid() { return address() != NULL; }
-
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
-
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
-
- Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag) {
- return reinterpret_cast<Space*>(owner_ - kFailureTag);
- } else {
- return NULL;
- }
- }
-
- void set_owner(Space* space) {
- ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
- owner_ = reinterpret_cast<Address>(space) + kFailureTag;
- ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag);
- }
-
- VirtualMemory* reserved_memory() {
- return &reservation_;
- }
-
- void InitializeReservedMemory() {
- reservation_.Reset();
- }
-
- void set_reserved_memory(VirtualMemory* reservation) {
- ASSERT_NOT_NULL(reservation);
- reservation_.TakeControl(reservation);
- }
-
- bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
- void initialize_scan_on_scavenge(bool scan) {
- if (scan) {
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- }
- inline void set_scan_on_scavenge(bool scan);
-
- int store_buffer_counter() { return store_buffer_counter_; }
- void set_store_buffer_counter(int counter) {
- store_buffer_counter_ = counter;
- }
-
- Address body() { return address() + kObjectStartOffset; }
-
- Address body_limit() { return address() + size(); }
-
- int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
-
- bool Contains(Address addr) {
- return addr >= body() && addr < address() + size();
- }
-
- // Checks whether addr can be a limit of addresses in this page.
- // It's a limit if it's in the page, or if it's just after the
- // last byte of the page.
- bool ContainsLimit(Address addr) {
- return addr >= body() && addr <= address() + size();
- }
-
- enum MemoryChunkFlags {
- IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
- POINTERS_TO_HERE_ARE_INTERESTING,
- POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
- IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
- IN_TO_SPACE, // All pages in new space has one of these two set.
- NEW_SPACE_BELOW_AGE_MARK,
- CONTAINS_ONLY_DATA,
- EVACUATION_CANDIDATE,
- RESCAN_ON_EVACUATION,
-
- // Pages swept precisely can be iterated, hitting only the live objects.
- // Whereas those swept conservatively cannot be iterated over. Both flags
- // indicate that marking bits have been cleared by the sweeper, otherwise
- // marking bits are still intact.
- WAS_SWEPT_PRECISELY,
- WAS_SWEPT_CONSERVATIVELY,
-
- // Last flag, keep at bottom.
- NUM_MEMORY_CHUNK_FLAGS
- };
-
-
- static const int kPointersToHereAreInterestingMask =
- 1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const int kPointersFromHereAreInterestingMask =
- 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const int kEvacuationCandidateMask =
- 1 << EVACUATION_CANDIDATE;
-
- static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) |
- (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) |
- (1 << IN_TO_SPACE);
-
-
- void SetFlag(int flag) {
- flags_ |= static_cast<uintptr_t>(1) << flag;
- }
-
- void ClearFlag(int flag) {
- flags_ &= ~(static_cast<uintptr_t>(1) << flag);
- }
-
- void SetFlagTo(int flag, bool value) {
- if (value) {
- SetFlag(flag);
- } else {
- ClearFlag(flag);
- }
- }
-
- bool IsFlagSet(int flag) {
- return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
- }
-
- // Set or clear multiple flags at a time. The flags in the mask
- // are set to the value in "flags", the rest retain the current value
- // in flags_.
- void SetFlags(intptr_t flags, intptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- intptr_t GetFlags() { return flags_; }
-
- // Manage live byte count (count of bytes known to be live,
- // because they are marked black).
- void ResetLiveBytes() {
- if (FLAG_gc_verbose) {
- PrintF("ResetLiveBytes:%p:%x->0\n",
- static_cast<void*>(this), live_byte_count_);
- }
- live_byte_count_ = 0;
- }
- void IncrementLiveBytes(int by) {
- ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
- if (FLAG_gc_verbose) {
- printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
- static_cast<void*>(this), live_byte_count_,
- ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
- live_byte_count_ + by);
- }
- live_byte_count_ += by;
- ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
- }
- int LiveBytes() {
- ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
- return live_byte_count_;
- }
- static void IncrementLiveBytes(Address address, int by) {
- MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
- }
-
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
-
- static const intptr_t kLiveBytesOffset =
- kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
- kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
- static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
- static const size_t kHeaderSize =
- kSlotsBufferOffset + kPointerSize + kPointerSize;
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset = kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
- size_t size() const { return size_; }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool ContainsOnlyData() {
- return IsFlagSet(CONTAINS_ONLY_DATA);
- }
-
- bool InNewSpace() {
- return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
- }
-
- bool InToSpace() {
- return IsFlagSet(IN_TO_SPACE);
- }
-
- bool InFromSpace() {
- return IsFlagSet(IN_FROM_SPACE);
- }
-
- // ---------------------------------------------------------------------
- // Markbits support
-
- inline Bitmap* markbits() {
- return Bitmap::FromAddress(address() + kHeaderSize);
- }
-
- void PrintMarkbits() { markbits()->Print(); }
-
- inline uint32_t AddressToMarkbitIndex(Address addr) {
- return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
- }
-
- inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset =
- reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
- inline Address MarkbitIndexToAddress(uint32_t index) {
- return this->address() + (index << kPointerSizeLog2);
- }
-
- void InsertAfter(MemoryChunk* other);
- void Unlink();
-
- inline Heap* heap() { return heap_; }
-
- static const int kFlagsOffset = kPointerSize * 3;
-
- bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
-
- bool ShouldSkipEvacuationSlotRecording() {
- return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
- }
-
- inline SkipList* skip_list() {
- return skip_list_;
- }
-
- inline void set_skip_list(SkipList* skip_list) {
- skip_list_ = skip_list;
- }
-
- inline SlotsBuffer* slots_buffer() {
- return slots_buffer_;
- }
-
- inline SlotsBuffer** slots_buffer_address() {
- return &slots_buffer_;
- }
-
- void MarkEvacuationCandidate() {
- ASSERT(slots_buffer_ == NULL);
- SetFlag(EVACUATION_CANDIDATE);
- }
-
- void ClearEvacuationCandidate() {
- ASSERT(slots_buffer_ == NULL);
- ClearFlag(EVACUATION_CANDIDATE);
- }
-
-
- protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
- size_t size_;
- intptr_t flags_;
- // If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
- // The identity of the owning space. This is tagged as a failure pointer, but
- // no failure can be in an object, so this can be distinguished from any entry
- // in a fixed array.
- Address owner_;
- Heap* heap_;
- // Used by the store buffer to keep track of which pages to mark scan-on-
- // scavenge.
- int store_buffer_counter_;
- // Count of bytes marked black on page.
- int live_byte_count_;
- SlotsBuffer* slots_buffer_;
- SkipList* skip_list_;
-
- static MemoryChunk* Initialize(Heap* heap,
- Address base,
- size_t size,
- Executability executable,
- Space* owner);
-
- friend class MemoryAllocator;
-};
-
-STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+// A page normally has 8K bytes. Large object pages may be larger. A page
+// address is always aligned to the 8K page size.
+//
+// Each page starts with a header of Page::kPageHeaderSize size which contains
+// bookkeeping data.
+//
+// The mark-compact collector transforms a map pointer into a page index and a
+// page offset. The exact encoding is described in the comments for
+// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
// Page* p = Page::FromAllocationTop(top);
-class Page : public MemoryChunk {
+class Page {
public:
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[
- // This only works if the object is in fact in a page. See also MemoryChunk::
- // FromAddress() and FromAnyAddress().
+ //
+ // Note that this function only works for addresses in normal paged
+ // spaces and addresses in the first 8K of large object pages (i.e.,
+ // the start of large objects but not necessarily derived pointers
+ // within them).
INLINE(static Page* FromAddress(Address a)) {
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
}
@@ -646,11 +152,30 @@ class Page : public MemoryChunk {
return p;
}
- // Returns the next page in the chain of pages owned by a space.
+ // Returns the start address of this page.
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ // Checks whether this is a valid page address.
+ bool is_valid() { return address() != NULL; }
+
+ // Returns the next page of this page.
inline Page* next_page();
- inline Page* prev_page();
- inline void set_next_page(Page* page);
- inline void set_prev_page(Page* page);
+
+ // Return the end of allocation in this page. Undefined for unused pages.
+ inline Address AllocationTop();
+
+ // Return the allocation watermark for the page.
+ // For old space pages it is guaranteed that the area under the watermark
+ // does not contain any garbage pointers to new space.
+ inline Address AllocationWatermark();
+
+ // Return the allocation watermark offset from the beginning of the page.
+ inline uint32_t AllocationWatermarkOffset();
+
+ inline void SetAllocationWatermark(Address allocation_watermark);
+
+ inline void SetCachedAllocationWatermark(Address allocation_watermark);
+ inline Address CachedAllocationWatermark();
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
@@ -663,6 +188,22 @@ class Page : public MemoryChunk {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
}
+ // True if this page was in use before current compaction started.
+ // Result is valid only for pages owned by paged spaces and
+ // only after PagedSpace::PrepareForMarkCompact was called.
+ inline bool WasInUseBeforeMC();
+
+ inline void SetWasInUseBeforeMC(bool was_in_use);
+
+ // True if this page is a large object page.
+ inline bool IsLargeObjectPage();
+
+ inline void SetIsLargeObjectPage(bool is_large_object_page);
+
+ inline Executability PageExecutability();
+
+ inline void SetPageExecutability(Executability executable);
+
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
@@ -677,6 +218,24 @@ class Page : public MemoryChunk {
}
// ---------------------------------------------------------------------
+ // Card marking support
+
+ static const uint32_t kAllRegionsCleanMarks = 0x0;
+ static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
+
+ inline uint32_t GetRegionMarks();
+ inline void SetRegionMarks(uint32_t dirty);
+
+ inline uint32_t GetRegionMaskForAddress(Address addr);
+ inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
+ inline int GetRegionNumberForAddress(Address addr);
+
+ inline void MarkRegionDirty(Address addr);
+ inline bool IsRegionDirty(Address addr);
+
+ inline void ClearRegionMarks(Address start,
+ Address end,
+ bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -684,69 +243,118 @@ class Page : public MemoryChunk {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+ static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
+ kIntSize + kPointerSize + kPointerSize;
+
+ // The start offset of the object area in a page. Aligned to both maps and
+ // code alignment to be suitable for both.
+ static const int kObjectStartOffset =
+ CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
+
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
- static const int kFirstUsedCell =
- (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
-
- static const int kLastUsedCell =
- ((kPageSize - kPointerSize)/kPointerSize) >>
- Bitmap::kBitsPerCellLog2;
-
- inline void ClearGCFields();
-
- static inline Page* Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
- PagedSpace* owner);
-
- void InitializeAsAnchor(PagedSpace* owner);
-
- bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
- bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
- bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
+ static const int kDirtyFlagOffset = 2 * kPointerSize;
+ static const int kRegionSizeLog2 = 8;
+ static const int kRegionSize = 1 << kRegionSizeLog2;
+ static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
- void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
- void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
+ STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
- void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
- void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+ enum PageFlag {
+ IS_NORMAL_PAGE = 0,
+ WAS_IN_USE_BEFORE_MC,
-#ifdef DEBUG
- void Print();
-#endif // DEBUG
+ // Page allocation watermark was bumped by preallocation during scavenge.
+ // Correct watermark can be retrieved by CachedAllocationWatermark() method
+ WATERMARK_INVALIDATED,
+ IS_EXECUTABLE,
+ NUM_PAGE_FLAGS // Must be last
+ };
+ static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
+
+ // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
+ // scavenge we just invalidate the watermark on each old space page after
+ // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
+ // flag at the beginning of the next scavenge and each page becomes marked as
+ // having a valid watermark.
+ //
+ // The following invariant must hold for pages in old pointer and map spaces:
+ // If page is in use then page is marked as having invalid watermark at
+ // the beginning and at the end of any GC.
+ //
+ // This invariant guarantees that after flipping flag meaning at the
+ // beginning of scavenge all pages in use will be marked as having valid
+ // watermark.
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
+
+ // Returns true if the page allocation watermark was not altered during
+ // scavenge.
+ inline bool IsWatermarkValid();
- friend class MemoryAllocator;
-};
+ inline void InvalidateWatermark(bool value);
+ inline bool GetPageFlag(PageFlag flag);
+ inline void SetPageFlag(PageFlag flag, bool value);
+ inline void ClearPageFlags();
-STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+ inline void ClearGCFields();
+ static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
+ static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
+ static const uint32_t kAllocationWatermarkOffsetMask =
+ ((1 << kAllocationWatermarkOffsetBits) - 1) <<
+ kAllocationWatermarkOffsetShift;
+
+ static const uint32_t kFlagsMask =
+ ((1 << kAllocationWatermarkOffsetShift) - 1);
+
+ STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
+ kAllocationWatermarkOffsetBits);
+
+ //---------------------------------------------------------------------------
+ // Page header description.
+ //
+ // If a page is not in the large object space, the first word,
+ // opaque_header, encodes the next page address (aligned to kPageSize 8K)
+ // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
+ // opaque_header. The value range of the opaque_header is [0..kPageSize[,
+ // or [next_page_start, next_page_end[. It cannot point to a valid address
+ // in the current page. If a page is in the large object space, the first
+ // word *may* (if the page start and large object chunk start are the
+ // same) contain the address of the next large object chunk.
+ intptr_t opaque_header;
+
+ // If the page is not in the large object space, the low-order bit of the
+ // second word is set. If the page is in the large object space, the
+ // second word *may* (if the page start and large object chunk start are
+ // the same) contain the large object chunk size. In either case, the
+ // low-order bit for large object pages will be cleared.
+ // For normal pages this word is used to store page flags and
+ // offset of allocation top.
+ intptr_t flags_;
-class LargePage : public MemoryChunk {
- public:
- HeapObject* GetObject() {
- return HeapObject::FromAddress(body());
- }
+ // This field contains dirty marks for regions covering the page. Only dirty
+ // regions might contain intergenerational references.
+ // Only 32 dirty marks are supported so for large object pages several regions
+ // might be mapped to a single dirty mark.
+ uint32_t dirty_regions_;
- inline LargePage* next_page() const {
- return static_cast<LargePage*>(next_chunk());
- }
+ // The index of the page in its owner space.
+ int mc_page_index;
- inline void set_next_page(LargePage* page) {
- set_next_chunk(page);
- }
- private:
- static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+ // During mark-compact collections this field contains the forwarding address
+ // of the first live object in this page.
+ // During scavenge collection this field is used to store allocation watermark
+ // if it is altered during scavenge.
+ Address mc_first_forwarded;
- friend class MemoryAllocator;
+ Heap* heap_;
};
-STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
@@ -772,14 +380,6 @@ class Space : public Malloced {
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (id_ == CODE_SPACE) {
- return RoundDown(size, kCodeAlignment);
- } else {
- return RoundDown(size, kPointerSize);
- }
- }
-
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -830,9 +430,9 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
- size_t* allocated);
- void FreeRawMemory(Address buf, size_t length);
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated);
+ void FreeRawMemory(void* buf, size_t length);
private:
Isolate* isolate_;
@@ -843,15 +443,9 @@ class CodeRange {
class FreeBlock {
public:
FreeBlock(Address start_arg, size_t size_arg)
- : start(start_arg), size(size_arg) {
- ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
- }
+ : start(start_arg), size(size_arg) {}
FreeBlock(void* start_arg, size_t size_arg)
- : start(static_cast<Address>(start_arg)), size(size_arg) {
- ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
- }
+ : start(static_cast<Address>(start_arg)), size(size_arg) {}
Address start;
size_t size;
@@ -879,63 +473,30 @@ class CodeRange {
};
-class SkipList {
- public:
- SkipList() {
- Clear();
- }
-
- void Clear() {
- for (int idx = 0; idx < kSize; idx++) {
- starts_[idx] = reinterpret_cast<Address>(-1);
- }
- }
-
- Address StartFor(Address addr) {
- return starts_[RegionNumber(addr)];
- }
-
- void AddObject(Address addr, int size) {
- int start_region = RegionNumber(addr);
- int end_region = RegionNumber(addr + size - kPointerSize);
- for (int idx = start_region; idx <= end_region; idx++) {
- if (starts_[idx] > addr) starts_[idx] = addr;
- }
- }
-
- static inline int RegionNumber(Address addr) {
- return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
- }
-
- static void Update(Address addr, int size) {
- Page* page = Page::FromAddress(addr);
- SkipList* list = page->skip_list();
- if (list == NULL) {
- list = new SkipList();
- page->set_skip_list(list);
- }
-
- list->AddObject(addr, size);
- }
-
- private:
- static const int kRegionSizeLog2 = 13;
- static const int kRegionSize = 1 << kRegionSizeLog2;
- static const int kSize = Page::kPageSize / kRegionSize;
-
- STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
-
- Address starts_[kSize];
-};
-
-
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
-// pages for large object space.
+// allocator manages chunks for the paged heap spaces (old space and map
+// space). A paged chunk consists of pages. Pages in a chunk have contiguous
+// addresses and are linked as a list.
+//
+// The allocator keeps an initial chunk which is used for the new space. The
+// leftover regions of the initial chunk are used for the initial chunks of
+// old space and map space if they are big enough to hold at least one page.
+// The allocator assumes that there is one old space and one map space, each
+// expands the space by allocating kPagesPerChunk pages except the last
+// expansion (before running out of space). The first chunk may contain fewer
+// than kPagesPerChunk pages as well.
+//
+// The memory allocator also allocates chunks for the large object space, but
+// they are managed by the space itself. The new space does not expand.
//
-// Each space has to manage it's own pages.
+// The fact that pages for paged spaces are allocated and deallocated in chunks
+// induces a constraint on the order of pages in a linked lists. We say that
+// pages are linked in the chunk-order if and only if every two consecutive
+// pages from the same chunk are consecutive in the linked list.
//
+
+
class MemoryAllocator {
public:
explicit MemoryAllocator(Isolate* isolate);
@@ -944,15 +505,91 @@ class MemoryAllocator {
// Max capacity of the total space and executable memory limit.
bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
+ // Deletes valid chunks.
void TearDown();
- Page* AllocatePage(PagedSpace* owner, Executability executable);
+ // Reserves an initial address range of virtual memory to be split between
+ // the two new space semispaces, the old space, and the map space. The
+ // memory is not yet committed or assigned to spaces and split into pages.
+ // The initial chunk is unmapped when the memory allocator is torn down.
+ // This function should only be called when there is not already a reserved
+ // initial chunk (initial_chunk_ should be NULL). It returns the start
+ // address of the initial chunk if successful, with the side effect of
+ // setting the initial chunk, or else NULL if unsuccessful and leaves the
+ // initial chunk NULL.
+ void* ReserveInitialChunk(const size_t requested);
+
+ // Commits pages from an as-yet-unmanaged block of virtual memory into a
+ // paged space. The block should be part of the initial chunk reserved via
+ // a call to ReserveInitialChunk. The number of pages is always returned in
+ // the output parameter num_pages. This function assumes that the start
+ // address is non-null and that it is big enough to hold at least one
+ // page-aligned page. The call always succeeds, and num_pages is always
+ // greater than zero.
+ Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+ int* num_pages);
+
+ // Commit a contiguous block of memory from the initial chunk. Assumes that
+ // the address is not NULL, the size is greater than zero, and that the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ bool CommitBlock(Address start, size_t size, Executability executable);
- LargePage* AllocateLargePage(intptr_t object_size,
- Executability executable,
- Space* owner);
+ // Uncommit a contiguous block of memory [start..(start+size)[.
+ // start is not NULL, the size is greater than zero, and the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ bool UncommitBlock(Address start, size_t size);
- void Free(MemoryChunk* chunk);
+ // Zaps a contiguous block of memory [start..(start+size)[ thus
+ // filling it up with a recognizable non-NULL bit pattern.
+ void ZapBlock(Address start, size_t size);
+
+ // Attempts to allocate the requested (non-zero) number of pages from the
+ // OS. Fewer pages might be allocated than requested. If it fails to
+ // allocate memory for the OS or cannot allocate a single page, this
+ // function returns an invalid page pointer (NULL). The caller must check
+ // whether the returned page is valid (by calling Page::is_valid()). It is
+ // guaranteed that allocated pages have contiguous addresses. The actual
+ // number of allocated pages is returned in the output parameter
+ // allocated_pages. If the PagedSpace owner is executable and there is
+ // a code range, the pages are allocated from the code range.
+ Page* AllocatePages(int requested_pages, int* allocated_pages,
+ PagedSpace* owner);
+
+ // Frees pages from a given page and after. Requires pages to be
+ // linked in chunk-order (see comment for class).
+ // If 'p' is the first page of a chunk, pages from 'p' are freed
+ // and this function returns an invalid page pointer.
+ // Otherwise, the function searches a page after 'p' that is
+ // the first page of a chunk. Pages after the found page
+ // are freed and the function returns 'p'.
+ Page* FreePages(Page* p);
+
+ // Frees all pages owned by given space.
+ void FreeAllPages(PagedSpace* space);
+
+ // Allocates and frees raw memory of certain size.
+ // These are just thin wrappers around OS::Allocate and OS::Free,
+ // but keep track of allocated bytes as part of heap.
+ // If the flag is EXECUTABLE and a code range exists, the requested
+ // memory is allocated from the code range. If a code range exists
+ // and the freed memory is in it, the code range manages the freed memory.
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated,
+ Executability executable);
+ void FreeRawMemory(void* buf,
+ size_t length,
+ Executability executable);
+ void PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size);
+
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+ bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -974,68 +611,67 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
+ // Links two pages.
+ inline void SetNextPage(Page* prev, Page* next);
- MemoryChunk* AllocateChunk(intptr_t body_size,
- Executability executable,
- Space* space);
+ // Returns the next page of a given page.
+ inline Page* GetNextPage(Page* p);
- Address ReserveAlignedMemory(size_t requested,
- size_t alignment,
- VirtualMemory* controller);
- Address AllocateAlignedMemory(size_t requested,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller);
+ // Checks whether a page belongs to a space.
+ inline bool IsPageInSpace(Page* p, PagedSpace* space);
- void FreeMemory(VirtualMemory* reservation, Executability executable);
- void FreeMemory(Address addr, size_t size, Executability executable);
+ // Returns the space that owns the given page.
+ inline PagedSpace* PageOwner(Page* page);
- // Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not NULL, the size is greater than zero, and that the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool CommitBlock(Address start, size_t size, Executability executable);
+ // Finds the first/last page in the same chunk as a given page.
+ Page* FindFirstPageInSameChunk(Page* p);
+ Page* FindLastPageInSameChunk(Page* p);
- // Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not NULL, the size is greater than zero, and the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool UncommitBlock(Address start, size_t size);
-
- // Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-NULL bit pattern.
- void ZapBlock(Address start, size_t size);
-
- void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
+ // Relinks list of pages owned by space to make it chunk-ordered.
+ // Returns new first and last pages of space.
+ // Also returns last page in relinked list which has WasInUsedBeforeMC
+ // flag set.
+ void RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use);
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
-
- void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
+#ifdef DEBUG
+ // Reports statistic info of the space.
+ void ReportStatistics();
+#endif
- bool MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback);
+ // Due to encoding limitation, we can only have 8K chunks.
+ static const int kMaxNofChunks = 1 << kPageSizeBits;
+ // If a chunk has at least 16 pages, the maximum heap size is about
+ // 8K * 8K * 16 = 1G bytes.
+#ifdef V8_TARGET_ARCH_X64
+ static const int kPagesPerChunk = 32;
+ // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
+ static const int kChunkTableLevels = 4;
+ static const int kChunkTableBitsPerLevel = 12;
+#else
+ static const int kPagesPerChunk = 16;
+ // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
+ static const int kChunkTableLevels = 2;
+ static const int kChunkTableBitsPerLevel = 8;
+#endif
private:
+ static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+
Isolate* isolate_;
// Maximum space size in bytes.
- size_t capacity_;
+ intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
- size_t capacity_executable_;
+ intptr_t capacity_executable_;
// Allocated space size in bytes.
- size_t size_;
+ intptr_t size_;
+
// Allocated executable space size in bytes.
- size_t size_executable_;
+ intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1047,11 +683,64 @@ class MemoryAllocator {
ObjectSpace space;
AllocationAction action;
};
-
// A List of callback that are triggered when memory is allocated or free'd
List<MemoryAllocationCallbackRegistration>
memory_allocation_callbacks_;
+ // The initial chunk of virtual memory.
+ VirtualMemory* initial_chunk_;
+
+ // Allocated chunk info: chunk start address, chunk size, and owning space.
+ class ChunkInfo BASE_EMBEDDED {
+ public:
+ ChunkInfo() : address_(NULL),
+ size_(0),
+ owner_(NULL),
+ executable_(NOT_EXECUTABLE),
+ owner_identity_(FIRST_SPACE) {}
+ inline void init(Address a, size_t s, PagedSpace* o);
+ Address address() { return address_; }
+ size_t size() { return size_; }
+ PagedSpace* owner() { return owner_; }
+ // We save executability of the owner to allow using it
+ // when collecting stats after the owner has been destroyed.
+ Executability executable() const { return executable_; }
+ AllocationSpace owner_identity() const { return owner_identity_; }
+
+ private:
+ Address address_;
+ size_t size_;
+ PagedSpace* owner_;
+ Executability executable_;
+ AllocationSpace owner_identity_;
+ };
+
+ // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
+ List<ChunkInfo> chunks_;
+ List<int> free_chunk_ids_;
+ int max_nof_chunks_;
+ int top_;
+
+ // Push/pop a free chunk id onto/from the stack.
+ void Push(int free_chunk_id);
+ int Pop();
+ bool OutOfChunkIds() { return top_ == 0; }
+
+ // Frees a chunk.
+ void DeleteChunk(int chunk_id);
+
+ // Basic check whether a chunk id is in the valid range.
+ inline bool IsValidChunkId(int chunk_id);
+
+ // Checks whether a chunk id identifies an allocated chunk.
+ inline bool IsValidChunk(int chunk_id);
+
+ // Returns the chunk id that a page belongs to.
+ inline int GetChunkId(Page* p);
+
+ // True if the address lies in the initial chunk.
+ inline bool InInitialChunk(Address address);
+
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
@@ -1059,7 +748,13 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
- DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+ Page* RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ size_t chunk_size,
+ Page* prev,
+ Page** last_page_in_use);
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
};
@@ -1082,58 +777,71 @@ class ObjectIterator : public Malloced {
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
+// A HeapObjectIterator iterates objects from a given address to the
+// top of a space. The given address must be below the current
+// allocation pointer (space top). There are some caveats.
+//
+// (1) If the space top changes upward during iteration (because of
+// allocating new objects), the iterator does not iterate objects
+// above the original space top. The caller must create a new
+// iterator starting from the old top in order to visit these new
+// objects.
+//
+// (2) If new objects are allocated below the original allocation top
+// (e.g., free-list allocation in paged spaces), the new objects
+// may or may not be iterated depending on their position with
+// respect to the current point of iteration.
//
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects. The caller must create a new
-// iterator in order to be sure to visit these new objects.
+// (3) The space top should not change downward during iteration,
+// otherwise the iterator will return not-necessarily-valid
+// objects.
+
class HeapObjectIterator: public ObjectIterator {
public:
- // Creates a new object iterator in a given space.
+ // Creates a new object iterator in a given space. If a start
+ // address is not given, the iterator starts from the space bottom.
// If the size function is not given, the iterator calls the default
// Object::Size().
explicit HeapObjectIterator(PagedSpace* space);
HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+ HeapObjectIterator(PagedSpace* space, Address start);
+ HeapObjectIterator(PagedSpace* space,
+ Address start,
+ HeapObjectCallback size_func);
HeapObjectIterator(Page* page, HeapObjectCallback size_func);
- // Advance to the next object, skipping free spaces and other fillers and
- // skipping the special garbage section of which there is one per space.
- // Returns NULL when the iteration has ended.
- inline HeapObject* Next() {
- do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
- } while (AdvanceToNextPage());
- return NULL;
+ inline HeapObject* next() {
+ return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
}
- virtual HeapObject* next_object() {
- return Next();
- }
+ // implementation of ObjectIterator.
+ virtual HeapObject* next_object() { return next(); }
private:
- enum PageMode { kOnePageOnly, kAllPagesInSpace };
+ Address cur_addr_; // current iteration point
+ Address end_addr_; // end iteration point
+ Address cur_limit_; // current page limit
+ HeapObjectCallback size_func_; // size function
+ Page* end_page_; // caches the page of the end address
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
- HeapObjectCallback size_func_; // Size function or NULL.
- PagedSpace* space_;
- PageMode page_mode_;
+ HeapObject* FromCurrentPage() {
+ ASSERT(cur_addr_ < cur_limit_);
+
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_limit_);
- // Fast (inlined) path of next().
- inline HeapObject* FromCurrentPage();
+ return obj;
+ }
- // Slow path of next(), goes into the next page. Returns false if the
- // iteration has ended.
- bool AdvanceToNextPage();
+ // Slow path of next, goes into the next page.
+ HeapObject* FromNextPage();
// Initializes fields.
- inline void Initialize(PagedSpace* owner,
- Address start,
- Address end,
- PageMode mode,
- HeapObjectCallback size_func);
+ void Initialize(Address start, Address end, HeapObjectCallback size_func);
#ifdef DEBUG
// Verifies whether fields have valid values.
@@ -1144,10 +852,36 @@ class HeapObjectIterator: public ObjectIterator {
// -----------------------------------------------------------------------------
// A PageIterator iterates the pages in a paged space.
+//
+// The PageIterator class provides three modes for iterating pages in a space:
+// PAGES_IN_USE iterates pages containing allocated objects.
+// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
+// mark-compact collection.
+// ALL_PAGES iterates all pages in the space.
+//
+// There are some caveats.
+//
+// (1) If the space expands during iteration, new pages will not be
+// returned by the iterator in any mode.
+//
+// (2) If new objects are allocated during iteration, they will appear
+// in pages returned by the iterator. Allocation may cause the
+// allocation pointer or MC allocation pointer in the last page to
+// change between constructing the iterator and iterating the last
+// page.
+//
+// (3) The space should not shrink during iteration, otherwise the
+// iterator will return deallocated pages.
class PageIterator BASE_EMBEDDED {
public:
- explicit inline PageIterator(PagedSpace* space);
+ enum Mode {
+ PAGES_IN_USE,
+ PAGES_USED_BY_MC,
+ ALL_PAGES
+ };
+
+ PageIterator(PagedSpace* space, Mode mode);
inline bool has_next();
inline Page* next();
@@ -1155,25 +889,21 @@ class PageIterator BASE_EMBEDDED {
private:
PagedSpace* space_;
Page* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- Page* next_page_;
+ Page* stop_page_; // Page to stop at (last page returned by the iterator).
};
// -----------------------------------------------------------------------------
-// A space has a circular list of pages. The next page can be accessed via
-// Page::next_page() call.
+// A space has a list of pages. The next page can be accessed via
+// Page::next_page() call. The next page of the last page is an
+// invalid page pointer. A space can expand and shrink dynamically.
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
public:
- AllocationInfo() : top(NULL), limit(NULL) {
- }
-
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
+ Address top; // current allocation top
+ Address limit; // current allocation limit
#ifdef DEBUG
bool VerifyPagedAllocation() {
@@ -1205,199 +935,70 @@ class AllocationStats BASE_EMBEDDED {
// Zero out all the allocation statistics (ie, no capacity).
void Clear() {
capacity_ = 0;
+ available_ = 0;
size_ = 0;
waste_ = 0;
}
- void ClearSizeWaste() {
- size_ = capacity_;
- waste_ = 0;
- }
-
// Reset the allocation statistics (ie, available = capacity with no
// wasted or allocated bytes).
void Reset() {
+ available_ = capacity_;
size_ = 0;
waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
+ intptr_t Available() { return available_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
- // Grow the space by adding available bytes. They are initially marked as
- // being in use (part of the size), but will normally be immediately freed,
- // putting them on the free list and removing them from size_.
+ // Grow the space by adding available bytes.
void ExpandSpace(int size_in_bytes) {
capacity_ += size_in_bytes;
- size_ += size_in_bytes;
- ASSERT(size_ >= 0);
+ available_ += size_in_bytes;
}
- // Shrink the space by removing available bytes. Since shrinking is done
- // during sweeping, bytes have been marked as being in use (part of the size)
- // and are hereby freed.
+ // Shrink the space by removing available bytes.
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
- size_ -= size_in_bytes;
- ASSERT(size_ >= 0);
+ available_ -= size_in_bytes;
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
+ available_ -= size_in_bytes;
size_ += size_in_bytes;
- ASSERT(size_ >= 0);
}
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- ASSERT(size_ >= 0);
+ available_ += size_in_bytes;
}
// Waste free bytes (available -> waste).
void WasteBytes(int size_in_bytes) {
- size_ -= size_in_bytes;
+ available_ -= size_in_bytes;
waste_ += size_in_bytes;
- ASSERT(size_ >= 0);
+ }
+
+ // Consider the wasted bytes to be allocated, as they contain filler
+ // objects (waste -> size).
+ void FillWastedBytes(intptr_t size_in_bytes) {
+ waste_ -= size_in_bytes;
+ size_ += size_in_bytes;
}
private:
intptr_t capacity_;
+ intptr_t available_;
intptr_t size_;
intptr_t waste_;
};
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline FreeListNode* next();
- inline FreeListNode** next_address();
- inline void set_next(FreeListNode* next);
-
- inline void Zap();
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space. The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which
-// is divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
-
-// The old space free list is organized in categories.
-// 1-31 words: Such small free areas are discarded for efficiency reasons.
-// They can be reclaimed by the compactor. However the distance between top
-// and limit may be this small.
-// 32-255 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 1-31 words in size. These
-// spaces are called small.
-// 256-2047 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 32-255 words in size. These
-// spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large. It is used for top
-// and limit when the object we need to allocate is 256-2047 words in size.
-// These spaces are call large.
-// At least 16384 words. This list is for objects of 2048 words or larger.
-// Empty pages are added to this list. These spaces are called huge.
-class FreeList BASE_EMBEDDED {
- public:
- explicit FreeList(PagedSpace* owner);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // ie, its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
-
- void MarkNodes();
-
-#ifdef DEBUG
- void Zap();
- static intptr_t SumFreeList(FreeListNode* node);
- static int FreeListLength(FreeListNode* cur);
- intptr_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
- void CountFreeListItems(Page* p, intptr_t* sizes);
-
- private:
- // The size range of blocks, in bytes.
- static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
- FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
-
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
- PagedSpace* owner_;
- Heap* heap_;
-
- // Total available bytes in all blocks on this free list.
- int available_;
-
- static const int kSmallListMin = 0x20 * kPointerSize;
- static const int kSmallListMax = 0xff * kPointerSize;
- static const int kMediumListMax = 0x7ff * kPointerSize;
- static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
- static const int kMediumAllocationMax = kSmallListMax;
- static const int kLargeAllocationMax = kMediumListMax;
- FreeListNode* small_list_;
- FreeListNode* medium_list_;
- FreeListNode* large_list_;
- FreeListNode* huge_list_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
-};
-
-
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
@@ -1412,7 +1013,7 @@ class PagedSpace : public Space {
// the memory allocator's initial chunk) if possible. If the block of
// addresses is not big enough to contain a single page-aligned page, a
// fresh chunk will be allocated.
- bool Setup();
+ bool Setup(Address start, size_t size);
// Returns true if the space has been successfully set up and not
// subsequently torn down.
@@ -1425,6 +1026,8 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
+ // Never crashes even if a is not a valid pointer.
+ inline bool SafeContains(Address a);
// Given an address occupied by a live object, return that object if it is
// in this space, or Failure::Exception() if it is not. The implementation
@@ -1432,91 +1035,104 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
+ // Checks whether page is currently in use by this space.
+ bool IsUsed(Page* page);
+
+ void MarkAllPagesClean();
+
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
+ virtual void PrepareForMarkCompact(bool will_compact);
- // Current capacity without growing (Size() + Available()).
+ // The top of allocation in a page in this space. Undefined if page is unused.
+ Address PageAllocationTop(Page* page) {
+ return page == TopPageOf(allocation_info_) ? top()
+ : PageAllocationLimit(page);
+ }
+
+ // The limit of allocation for a page in this space.
+ virtual Address PageAllocationLimit(Page* page) = 0;
+
+ void FlushTopPageWatermark() {
+ AllocationTopPage()->SetCachedAllocationWatermark(top());
+ AllocationTopPage()->InvalidateWatermark(true);
+ }
+
+ // Current capacity without growing (Size() + Available() + Waste()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
- // Sets the capacity, the available space and the wasted space to zero.
- // The stats are rebuilt during sweeping by adding each page to the
- // capacity and the size when it is encountered. As free spaces are
- // discovered during the sweeping they are subtracted from the size and added
- // to the available and wasted totals.
- void ClearStats() {
- accounting_stats_.ClearSizeWaste();
- }
-
- // Available bytes without growing. These are the bytes on the free list.
- // The bytes in the linear allocation area are not included in this total
- // because updating the stats would slow down allocation. New pages are
- // immediately added to the free list so they show up here.
- intptr_t Available() { return free_list_.available(); }
+ // Available bytes without growing.
+ intptr_t Available() { return accounting_stats_.Available(); }
- // Allocated bytes in this space. Garbage bytes that were not found due to
- // lazy sweeping are counted as being allocated! The bytes in the current
- // linear allocation area (between top and limit) are also counted here.
+ // Allocated bytes in this space.
virtual intptr_t Size() { return accounting_stats_.Size(); }
- // As size, but the bytes in the current linear allocation area are not
- // included.
- virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
+ // Wasted bytes due to fragmentation and not recoverable until the
+ // next GC of this space.
+ intptr_t Waste() { return accounting_stats_.Waste(); }
- // Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation. They do not include the
- // free bytes that were not found at all due to lazy sweeping.
- virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+ // Returns the address of the first object in this space.
+ Address bottom() { return first_page_->ObjectAreaStart(); }
// Returns the allocation pointer in this space.
- Address top() {
- return allocation_info_.top;
- }
- Address limit() { return allocation_info_.limit; }
+ Address top() { return allocation_info_.top; }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+ // Allocate the requested number of bytes for relocation during mark-compact
+ // collection.
+ MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
+
virtual bool ReserveSpace(int bytes);
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- int Free(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes);
- accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
- return size_in_bytes - wasted;
- }
+ // Used by ReserveSpace.
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
+
+ // Free all pages in range from prev (exclusive) to last (inclusive).
+ // Freed pages are moved to the end of page list.
+ void FreePages(Page* prev, Page* last);
+
+ // Deallocates a block.
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) = 0;
// Set space allocation info.
- void SetTop(Address top, Address limit) {
- ASSERT(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ void SetTop(Address top) {
allocation_info_.top = top;
- allocation_info_.limit = limit;
+ allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
}
- void Allocate(int bytes) {
- accounting_stats_.AllocateBytes(bytes);
- }
+ // ---------------------------------------------------------------------------
+ // Mark-compact collection support functions
- void IncreaseCapacity(int size) {
- accounting_stats_.ExpandSpace(size);
+ // Set the relocation point to the beginning of the space.
+ void MCResetRelocationInfo();
+
+ // Writes relocation info to the top page.
+ void MCWriteRelocationInfoToPage() {
+ TopPageOf(mc_forwarding_info_)->
+ SetAllocationWatermark(mc_forwarding_info_.top);
}
- // Releases an unused page and shrinks the space.
- void ReleasePage(Page* page);
+ // Computes the offset of a given address in this space to the beginning
+ // of the space.
+ int MCSpaceOffsetForAddress(Address addr);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo() = 0;
- // Releases all of the unused pages.
- void ReleaseAllUnusedPages();
+ // Releases half of unused pages.
+ void Shrink();
- // The dummy page that anchors the linked list of pages.
- Page* anchor() { return &anchor_; }
+ // Ensures that the capacity is at least 'capacity'. Returns false on failure.
+ bool EnsureCapacity(int capacity);
#ifdef DEBUG
// Print meta info and objects in this space.
@@ -1525,9 +1141,6 @@ class PagedSpace : public Space {
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
- // Reports statistics for the space
- void ReportStatistics();
-
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
@@ -1538,67 +1151,10 @@ class PagedSpace : public Space {
static void ResetCodeStatistics();
#endif
- bool was_swept_conservatively() { return was_swept_conservatively_; }
- void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
-
- // Evacuation candidates are swept by evacuator. Needs to return a valid
- // result before _and_ after evacuation has finished.
- static bool ShouldBeSweptLazily(Page* p) {
- return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
- !p->WasSweptPrecisely();
- }
-
- void SetPagesToSweep(Page* first, Page* last) {
- first_unswept_page_ = first;
- last_unswept_page_ = last;
- }
-
- bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
- bool IsSweepingComplete() {
- return !first_unswept_page_->is_valid();
- }
-
- Page* FirstPage() { return anchor_.next_page(); }
- Page* LastPage() { return anchor_.prev_page(); }
-
- bool IsFragmented(Page* p) {
- intptr_t sizes[4];
- free_list_.CountFreeListItems(p, sizes);
-
- intptr_t ratio;
- intptr_t ratio_threshold;
- if (identity() == CODE_SPACE) {
- ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize;
- ratio_threshold = 10;
- } else {
- ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize;
- ratio_threshold = 15;
- }
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p),
- identity(),
- static_cast<int>(sizes[0]),
- static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[1]),
- static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[2]),
- static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[3]),
- static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
+ // Returns the page of the allocation pointer.
+ Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
- return (ratio > ratio_threshold) ||
- (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
- }
-
- void EvictEvacuationCandidatesFromFreeLists();
-
- bool CanExpand();
+ void RelinkPageListInChunkOrder(bool deallocate_blocks);
protected:
// Maximum capacity of this space.
@@ -1607,42 +1163,80 @@ class PagedSpace : public Space {
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The dummy page that anchors the double linked list of pages.
- Page anchor_;
+ // The first page in this space.
+ Page* first_page_;
- // The space's free list.
- FreeList free_list_;
+ // The last page in this space. Initially set in Setup, updated in
+ // Expand and Shrink.
+ Page* last_page_;
+
+ // True if pages owned by this space are linked in chunk-order.
+ // See comment for class MemoryAllocator for definition of chunk-order.
+ bool page_list_is_chunk_ordered_;
// Normal allocation information.
AllocationInfo allocation_info_;
+ // Relocation information during mark-compact collections.
+ AllocationInfo mc_forwarding_info_;
+
// Bytes of each page that cannot be allocated. Possibly non-zero
// for pages in spaces with only fixed-size objects. Always zero
// for pages in spaces with variable sized objects (those pages are
// padded with free-list nodes).
int page_extra_;
- bool was_swept_conservatively_;
+ // Sets allocation pointer to a page bottom.
+ static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
- Page* first_unswept_page_;
- Page* last_unswept_page_;
+ // Returns the top page specified by an allocation info structure.
+ static Page* TopPageOf(AllocationInfo alloc_info) {
+ return Page::FromAllocationTop(alloc_info.limit);
+ }
+
+ int CountPagesToTop() {
+ Page* p = Page::FromAllocationTop(allocation_info_.top);
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ int counter = 1;
+ while (it.has_next()) {
+ if (it.next() == p) return counter;
+ counter++;
+ }
+ UNREACHABLE();
+ return -1;
+ }
// Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS.
- bool Expand();
+ // it cannot allocate requested number of pages from OS. Newly allocated
+ // pages are append to the last_page;
+ bool Expand(Page* last_page);
+
+ // Generic fast case allocation function that tries linear allocation in
+ // the top page of 'alloc_info'. Returns NULL on failure.
+ inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
+ int size_in_bytes);
- // Generic fast case allocation function that tries linear allocation at the
- // address denoted by top in allocation_info_.
- inline HeapObject* AllocateLinearly(int size_in_bytes);
+ // During normal allocation or deserialization, roll to the next page in
+ // the space (there is assumed to be one) and allocate there. This
+ // function is space-dependent.
+ virtual HeapObject* AllocateInNextPage(Page* current_page,
+ int size_in_bytes) = 0;
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
+
+ // Slow path of MCAllocateRaw.
+ MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
#endif
+ private:
+ // Returns a pointer to the page of the relocation pointer.
+ Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+
friend class PageIterator;
};
@@ -1682,113 +1276,20 @@ class HistogramInfo: public NumberAndSizeInfo {
};
-enum SemiSpaceId {
- kFromSpace = 0,
- kToSpace = 1
-};
-
-
-class SemiSpace;
-
-
-class NewSpacePage : public MemoryChunk {
- public:
- // GC related flags copied from from-space to to-space when
- // flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::SCAN_ON_SCAVENGE);
-
- inline NewSpacePage* next_page() const {
- return static_cast<NewSpacePage*>(next_chunk());
- }
-
- inline void set_next_page(NewSpacePage* page) {
- set_next_chunk(page);
- }
-
- inline NewSpacePage* prev_page() const {
- return static_cast<NewSpacePage*>(prev_chunk());
- }
-
- inline void set_prev_page(NewSpacePage* page) {
- set_prev_chunk(page);
- }
-
- SemiSpace* semi_space() {
- return reinterpret_cast<SemiSpace*>(owner());
- }
-
- bool is_anchor() { return !this->InNewSpace(); }
-
- static bool IsAtStart(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
- == kObjectStartOffset;
- }
-
- static bool IsAtEnd(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
- }
-
- Address address() {
- return reinterpret_cast<Address>(this);
- }
-
- // Finds the NewSpacePage containg the given address.
- static inline NewSpacePage* FromAddress(Address address_in_page) {
- Address page_start =
- reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
- ~Page::kPageAlignmentMask);
- NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
- ASSERT(page->InNewSpace());
- return page;
- }
-
- // Find the page for a limit address. A limit address is either an address
- // inside a page, or the address right after the last byte of a page.
- static inline NewSpacePage* FromLimit(Address address_limit) {
- return NewSpacePage::FromAddress(address_limit - 1);
- }
-
- private:
- // Create a NewSpacePage object that is only used as anchor
- // for the doubly-linked list of real pages.
- explicit NewSpacePage(SemiSpace* owner) {
- InitializeAsAnchor(owner);
- }
-
- static NewSpacePage* Initialize(Heap* heap,
- Address start,
- SemiSpace* semi_space);
-
- // Intialize a fake NewSpacePage used as sentinel at the ends
- // of a doubly-linked list of real NewSpacePages.
- // Only uses the prev/next links, and sets flags to not be in new-space.
- void InitializeAsAnchor(SemiSpace* owner);
-
- friend class SemiSpace;
- friend class SemiSpaceIterator;
-};
-
-
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
+// A semispace is a contiguous chunk of memory. The mark-compact collector
+// uses the memory in the from space as a marking stack when tracing live
+// objects.
class SemiSpace : public Space {
public:
// Constructor.
- SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- start_(NULL),
- age_mark_(NULL),
- id_(semispace),
- anchor_(this),
- current_page_(NULL) { }
+ explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
+ start_ = NULL;
+ age_mark_ = NULL;
+ }
// Sets up the semispace using the given chunk.
bool Setup(Address start, int initial_capacity, int maximum_capacity);
@@ -1800,9 +1301,14 @@ class SemiSpace : public Space {
// True if the space has been set up but not torn down.
bool HasBeenSetup() { return start_ != NULL; }
+ // Grow the size of the semispace by committing extra virtual memory.
+ // Assumes that the caller has checked that the semispace has not reached
+ // its maximum capacity (and thus there is space available in the reserved
+ // address range to grow).
+ bool Grow();
+
// Grow the semispace to the new capacity. The new capacity
- // requested must be larger than the current capacity and less than
- // the maximum capacity.
+ // requested must be larger than the current capacity.
bool GrowTo(int new_capacity);
// Shrinks the semispace to the new capacity. The new capacity
@@ -1810,41 +1316,14 @@ class SemiSpace : public Space {
// semispace and less than the current capacity.
bool ShrinkTo(int new_capacity);
- // Returns the start address of the first page of the space.
- Address space_start() {
- ASSERT(anchor_.next_page() != &anchor_);
- return anchor_.next_page()->body();
- }
-
- // Returns the start address of the current page of the space.
- Address page_low() {
- ASSERT(anchor_.next_page() != &anchor_);
- return current_page_->body();
- }
-
+ // Returns the start address of the space.
+ Address low() { return start_; }
// Returns one past the end address of the space.
- Address space_end() {
- return anchor_.prev_page()->body_limit();
- }
-
- // Returns one past the end address of the current page of the space.
- Address page_high() {
- return current_page_->body_limit();
- }
-
- bool AdvancePage() {
- NewSpacePage* next_page = current_page_->next_page();
- if (next_page == anchor()) return false;
- current_page_ = next_page;
- return true;
- }
-
- // Resets the space to using the first page.
- void Reset();
+ Address high() { return low() + capacity_; }
// Age mark accessors.
Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark);
+ void set_age_mark(Address mark) { age_mark_ = mark; }
// True if the address is in the address range of this semispace (not
// necessarily below the allocation pointer).
@@ -1859,6 +1338,11 @@ class SemiSpace : public Space {
return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
}
+ // The offset of an address from the beginning of the space.
+ int SpaceOffsetForAddress(Address addr) {
+ return static_cast<int>(addr - low());
+ }
+
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
virtual intptr_t Size() {
@@ -1875,19 +1359,9 @@ class SemiSpace : public Space {
bool Commit();
bool Uncommit();
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
-
#ifdef DEBUG
virtual void Print();
virtual void Verify();
- // Validate a range of of addresses in a SemiSpace.
- // The "from" address must be on a page prior to the "to" address,
- // in the linked page order, or it must be earlier on the same page.
- static void AssertValidRange(Address from, Address to);
-#else
- // Do nothing.
- inline static void AssertValidRange(Address from, Address to) {}
#endif
// Returns the current capacity of the semi space.
@@ -1899,17 +1373,7 @@ class SemiSpace : public Space {
// Returns the initial capacity of the semi space.
int InitialCapacity() { return initial_capacity_; }
- SemiSpaceId id() { return id_; }
-
- static void Swap(SemiSpace* from, SemiSpace* to);
-
private:
- // Flips the semispace between being from-space and to-space.
- // Copies the flags into the masked positions on all pages in the space.
- void FlipPages(intptr_t flags, intptr_t flag_mask);
-
- NewSpacePage* anchor() { return &anchor_; }
-
// The current and maximum capacity of the space.
int capacity_;
int maximum_capacity_;
@@ -1926,13 +1390,7 @@ class SemiSpace : public Space {
uintptr_t object_expected_;
bool committed_;
- SemiSpaceId id_;
- NewSpacePage anchor_;
- NewSpacePage* current_page_;
-
- friend class SemiSpaceIterator;
- friend class NewSpacePageIterator;
public:
TRACK_MEMORY("SemiSpace")
};
@@ -1948,26 +1406,12 @@ class SemiSpaceIterator : public ObjectIterator {
// Create an iterator over the objects in the given space. If no start
// address is given, the iterator starts from the bottom of the space. If
// no size function is given, the iterator calls Object::Size().
-
- // Iterate over all of allocated to-space.
explicit SemiSpaceIterator(NewSpace* space);
- // Iterate over all of allocated to-space, with a custome size function.
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
- // Iterate over part of allocated to-space, from start to the end
- // of allocation.
SemiSpaceIterator(NewSpace* space, Address start);
- // Iterate from one address to another in the same semi-space.
- SemiSpaceIterator(Address from, Address to);
- HeapObject* Next() {
+ HeapObject* next() {
if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- ASSERT(!page->is_anchor());
- current_ = page->body();
- if (current_ == limit_) return NULL;
- }
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1977,13 +1421,14 @@ class SemiSpaceIterator : public ObjectIterator {
}
// Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return Next(); }
+ virtual HeapObject* next_object() { return next(); }
private:
- void Initialize(Address start,
- Address end,
+ void Initialize(NewSpace* space, Address start, Address end,
HeapObjectCallback size_func);
+ // The semispace.
+ SemiSpace* space_;
// The current iteration point.
Address current_;
// The end of iteration.
@@ -1994,34 +1439,6 @@ class SemiSpaceIterator : public ObjectIterator {
// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
- // Make an iterator that runs over all pages in to-space.
- explicit inline NewSpacePageIterator(NewSpace* space);
-
- // Make an iterator that runs over all pages in the given semispace,
- // even those not used in allocation.
- explicit inline NewSpacePageIterator(SemiSpace* space);
-
- // Make iterator that iterates from the page containing start
- // to the page that contains limit in the same semispace.
- inline NewSpacePageIterator(Address start, Address limit);
-
- inline bool has_next();
- inline NewSpacePage* next();
-
- private:
- NewSpacePage* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- NewSpacePage* next_page_;
- // Last page returned.
- NewSpacePage* last_page_;
-};
-
-
-// -----------------------------------------------------------------------------
// The young generation space.
//
// The new space consists of a contiguous pair of semispaces. It simply
@@ -2032,13 +1449,11 @@ class NewSpace : public Space {
// Constructor.
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace),
- reservation_(),
- inline_allocation_limit_step_(0) {}
+ to_space_(heap),
+ from_space_(heap) {}
// Sets up the new space using the given chunk.
- bool Setup(int reserved_semispace_size_, int max_semispace_size);
+ bool Setup(Address start, int size);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2065,30 +1480,18 @@ class NewSpace : public Space {
return (reinterpret_cast<uintptr_t>(a) & address_mask_)
== reinterpret_cast<uintptr_t>(start_);
}
-
bool Contains(Object* o) {
- Address a = reinterpret_cast<Address>(o);
- return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
+ return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
}
// Return the allocated bytes in the active semispace.
- virtual intptr_t Size() {
- return pages_used_ * Page::kObjectAreaSize +
- static_cast<int>(top() - to_space_.page_low());
- }
-
+ virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
// The same, but returning an int. We have to have the one that returns
// intptr_t because it is inherited, but if we know we are dealing with the
// new space, which can't get as big as the other spaces then this is useful:
int SizeAsInt() { return static_cast<int>(Size()); }
// Return the current capacity of a semispace.
- intptr_t EffectiveCapacity() {
- ASSERT(to_space_.Capacity() == from_space_.Capacity());
- return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
- }
-
- // Return the current capacity of a semispace.
intptr_t Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity();
@@ -2100,11 +1503,8 @@ class NewSpace : public Space {
return Capacity();
}
- // Return the available bytes without growing or switching page in the
- // active semispace.
- intptr_t Available() {
- return allocation_info_.limit - allocation_info_.top;
- }
+ // Return the available bytes without growing in the active semispace.
+ intptr_t Available() { return Capacity() - Size(); }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -2119,12 +1519,9 @@ class NewSpace : public Space {
}
// Return the address of the allocation pointer in the active semispace.
- Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
- }
+ Address top() { return allocation_info_.top; }
// Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.space_start(); }
+ Address bottom() { return to_space_.low(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
@@ -2136,70 +1533,54 @@ class NewSpace : public Space {
Address start() { return start_; }
uintptr_t mask() { return address_mask_; }
- INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
- ASSERT(Contains(addr));
- ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
- IsAligned(OffsetFrom(addr) - 1, kPointerSize));
- return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
- }
-
- INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
- return reinterpret_cast<Address>(index << kPointerSizeLog2);
- }
-
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, &allocation_info_);
+ }
+
+ // Allocate the requested number of bytes for relocation during mark-compact
+ // collection.
+ MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
+ return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
}
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ // Reset the reloction pointer to the bottom of the inactive semispace in
+ // preparation for mark-compact collection.
+ void MCResetRelocationInfo();
+ // Update the allocation pointer in the active semispace after a
+ // mark-compact collection.
+ void MCCommitRelocationInfo();
- void LowerInlineAllocationLimit(intptr_t step) {
- inline_allocation_limit_step_ = step;
- if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
- } else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
- }
- top_on_previous_step_ = allocation_info_.top;
- }
-
- // Get the extent of the inactive semispace (for use as a marking stack,
- // or to zap it). Notice: space-addresses are not necessarily on the
- // same page, so FromSpaceStart() might be above FromSpaceEnd().
- Address FromSpacePageLow() { return from_space_.page_low(); }
- Address FromSpacePageHigh() { return from_space_.page_high(); }
- Address FromSpaceStart() { return from_space_.space_start(); }
- Address FromSpaceEnd() { return from_space_.space_end(); }
+ // Get the extent of the inactive semispace (for use as a marking stack).
+ Address FromSpaceLow() { return from_space_.low(); }
+ Address FromSpaceHigh() { return from_space_.high(); }
- // Get the extent of the active semispace's pages' memory.
- Address ToSpaceStart() { return to_space_.space_start(); }
- Address ToSpaceEnd() { return to_space_.space_end(); }
+ // Get the extent of the active semispace (to sweep newly copied objects
+ // during a scavenge collection).
+ Address ToSpaceLow() { return to_space_.low(); }
+ Address ToSpaceHigh() { return to_space_.high(); }
- inline bool ToSpaceContains(Address address) {
- return to_space_.Contains(address);
+ // Offsets from the beginning of the semispaces.
+ int ToSpaceOffsetForAddress(Address a) {
+ return to_space_.SpaceOffsetForAddress(a);
}
- inline bool FromSpaceContains(Address address) {
- return from_space_.Contains(address);
+ int FromSpaceOffsetForAddress(Address a) {
+ return from_space_.SpaceOffsetForAddress(a);
}
// True if the object is a heap object in the address range of the
// respective semispace (not necessarily below the allocation pointer of the
// semispace).
- inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+ bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+ bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
- // Try to switch the active semispace to a new, empty, page.
- // Returns false if this isn't possible or reasonable (i.e., there
- // are no pages, or the current page is already empty), or true
- // if successful.
- bool AddFreshPage();
+ bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
+ bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
virtual bool ReserveSpace(int bytes);
@@ -2239,24 +1620,10 @@ class NewSpace : public Space {
return from_space_.Uncommit();
}
- inline intptr_t inline_allocation_limit_step() {
- return inline_allocation_limit_step_;
- }
-
- SemiSpace* active_space() { return &to_space_; }
-
private:
- // Update allocation info to match the current to-space page.
- void UpdateAllocationInfo();
-
- Address chunk_base_;
- uintptr_t chunk_size_;
-
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
- VirtualMemory reservation_;
- int pages_used_;
// Start address and bit mask for containment testing.
Address start_;
@@ -2267,20 +1634,15 @@ class NewSpace : public Space {
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
-
- // When incremental marking is active we will set allocation_info_.limit
- // to be lower than actual limit and then will gradually increase it
- // in steps to guarantee that we do incremental marking steps even
- // when all allocation is performed from inlined generated code.
- intptr_t inline_allocation_limit_step_;
-
- Address top_on_previous_step_;
+ AllocationInfo mc_forwarding_info_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- // Implementation of AllocateRaw.
- MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes);
+ // Implementation of AllocateRaw and MCAllocateRaw.
+ MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
+ int size_in_bytes,
+ AllocationInfo* alloc_info);
friend class SemiSpaceIterator;
@@ -2290,6 +1652,193 @@ class NewSpace : public Space {
// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap. They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object). They have a size and a next pointer. The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+ // Obtain a free-list node from a raw address. This is not a cast because
+ // it does not check nor require that the first word at the address is a map
+ // pointer.
+ static FreeListNode* FromAddress(Address address) {
+ return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+ }
+
+ static inline bool IsFreeListNode(HeapObject* object);
+
+ // Set the size in bytes, which can be read with HeapObject::Size(). This
+ // function also writes a map to the first word of the block so that it
+ // looks like a heap object to the garbage collector and heap iteration
+ // functions.
+ void set_size(Heap* heap, int size_in_bytes);
+
+ // Accessors for the next field.
+ inline Address next(Heap* heap);
+ inline void set_next(Heap* heap, Address next);
+
+ private:
+ static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.
+class OldSpaceFreeList BASE_EMBEDDED {
+ public:
+ OldSpaceFreeList(Heap* heap, AllocationSpace owner);
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ intptr_t available() { return available_; }
+
+ // Place a node on the free list. The block of size 'size_in_bytes'
+ // starting at 'start' is placed on the free list. The return value is the
+ // number of bytes that have been lost due to internal fragmentation by
+ // freeing the block. Bookkeeping information will be written to the block,
+ // ie, its contents will be destroyed. The start address should be word
+ // aligned, and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size 'size_in_bytes' from the free list. The block
+ // is unitialized. A failure is returned if no block is available. The
+ // number of bytes lost to fragmentation is returned in the output parameter
+ // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
+
+ void MarkNodes();
+
+ private:
+ // The size range of blocks, in bytes. (Smaller allocations are allowed, but
+ // will always result in waste.)
+ static const int kMinBlockSize = 2 * kPointerSize;
+ static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+ Heap* heap_;
+
+ // The identity of the owning space, for building allocation Failure
+ // objects.
+ AllocationSpace owner_;
+
+ // Total available bytes in all blocks on this free list.
+ int available_;
+
+ // Blocks are put on exact free lists in an array, indexed by size in words.
+ // The available sizes are kept in an increasingly ordered list. Entries
+ // corresponding to sizes < kMinBlockSize always have an empty free list
+ // (but index kHead is used for the head of the size list).
+ struct SizeNode {
+ // Address of the head FreeListNode of the implied block size or NULL.
+ Address head_node_;
+ // Size (words) of the next larger available size if head_node_ != NULL.
+ int next_size_;
+ };
+ static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
+ SizeNode free_[kFreeListsLength];
+
+ // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
+ static const int kHead = kMinBlockSize / kPointerSize - 1;
+ static const int kEnd = kMaxInt;
+
+ // We keep a "finger" in the size list to speed up a common pattern:
+ // repeated requests for the same or increasing sizes.
+ int finger_;
+
+ // Starting from *prev, find and return the smallest size >= index (words),
+ // or kEnd. Update *prev to be the largest size < index, or kHead.
+ int FindSize(int index, int* prev) {
+ int cur = free_[*prev].next_size_;
+ while (cur < index) {
+ *prev = cur;
+ cur = free_[cur].next_size_;
+ }
+ return cur;
+ }
+
+ // Remove an existing element from the size list.
+ void RemoveSize(int index) {
+ int prev = kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(cur == index);
+ free_[prev].next_size_ = free_[cur].next_size_;
+ finger_ = prev;
+ }
+
+ // Insert a new element into the size list.
+ void InsertSize(int index) {
+ int prev = kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(cur != index);
+ free_[prev].next_size_ = index;
+ free_[index].next_size_ = cur;
+ }
+
+ // The size list is not updated during a sequence of calls to Free, but is
+ // rebuilt before the next allocation.
+ void RebuildSizeList();
+ bool needs_rebuild_;
+
+#ifdef DEBUG
+ // Does this free list contain a free block located at the address of 'node'?
+ bool Contains(FreeListNode* node);
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
+};
+
+
+// The free list for the map space.
+class FixedSizeFreeList BASE_EMBEDDED {
+ public:
+ FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ intptr_t available() { return available_; }
+
+ // Place a node on the free list. The block starting at 'start' (assumed to
+ // have size object_size_) is placed on the free list. Bookkeeping
+ // information will be written to the block, ie, its contents will be
+ // destroyed. The start address should be word aligned.
+ void Free(Address start);
+
+ // Allocate a fixed sized block from the free list. The block is unitialized.
+ // A failure is returned if no block is available.
+ MUST_USE_RESULT MaybeObject* Allocate();
+
+ void MarkNodes();
+
+ private:
+ Heap* heap_;
+
+ // Available bytes on the free list.
+ intptr_t available_;
+
+ // The head of the free list.
+ Address head_;
+
+ // The tail of the free list.
+ Address tail_;
+
+ // The identity of the owning space, for building allocation Failure
+ // objects.
+ AllocationSpace owner_;
+
+ // The size of the objects in this space.
+ int object_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
+};
+
+
+// -----------------------------------------------------------------------------
// Old object space (excluding map objects)
class OldSpace : public PagedSpace {
@@ -2300,28 +1849,71 @@ class OldSpace : public PagedSpace {
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {
+ : PagedSpace(heap, max_capacity, id, executable),
+ free_list_(heap, id) {
page_extra_ = 0;
}
+ // The bytes available on the free list (ie, not above the linear allocation
+ // pointer).
+ intptr_t AvailableFree() { return free_list_.available(); }
+
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
}
+ // Give a block of memory to the space's free list. It might be added to
+ // the free list or accounted as waste.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ void Free(Address start, int size_in_bytes, bool add_to_freelist) {
+ accounting_stats_.DeallocateBytes(size_in_bytes);
+
+ if (add_to_freelist) {
+ int wasted_bytes = free_list_.Free(start, size_in_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
+ }
+
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
+ // Prepare for full garbage collection. Resets the relocation pointer and
+ // clears the free list.
+ virtual void PrepareForMarkCompact(bool will_compact);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo();
+
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
+ void MarkFreeListNodes() { free_list_.MarkNodes(); }
+
+#ifdef DEBUG
+ // Reports statistics for the space
+ void ReportStatistics();
+#endif
+
+ protected:
+ // Virtual function in the superclass. Slow path of AllocateRaw.
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ // Virtual function in the superclass. Allocate linearly at the start of
+ // the page after current_page (there is assumed to be one).
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
+ private:
+ // The space's free list.
+ OldSpaceFreeList free_list_;
+
public:
TRACK_MEMORY("OldSpace")
};
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
-
-
// -----------------------------------------------------------------------------
// Old space for objects of a fixed size
@@ -2334,7 +1926,8 @@ class FixedSpace : public PagedSpace {
const char* name)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
- name_(name) {
+ name_(name),
+ free_list_(heap, id, object_size_in_bytes) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
@@ -2345,12 +1938,44 @@ class FixedSpace : public PagedSpace {
int object_size_in_bytes() { return object_size_in_bytes_; }
+ // Give a fixed sized block of memory to the space's free list.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ void Free(Address start, bool add_to_freelist) {
+ if (add_to_freelist) {
+ free_list_.Free(start);
+ }
+ accounting_stats_.DeallocateBytes(object_size_in_bytes_);
+ }
+
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
+ virtual void PrepareForMarkCompact(bool will_compact);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo();
+
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
void MarkFreeListNodes() { free_list_.MarkNodes(); }
+#ifdef DEBUG
+ // Reports statistic info of the space
+ void ReportStatistics();
+#endif
+
protected:
+ // Virtual function in the superclass. Slow path of AllocateRaw.
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ // Virtual function in the superclass. Allocate linearly at the start of
+ // the page after current_page (there is assumed to be one).
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
void ResetFreeList() {
free_list_.Reset();
}
@@ -2361,6 +1986,9 @@ class FixedSpace : public PagedSpace {
// The name of this space.
const char* name_;
+
+ // The space's free list.
+ FixedSizeFreeList free_list_;
};
@@ -2376,18 +2004,83 @@ class MapSpace : public FixedSpace {
AllocationSpace id)
: FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
+ ASSERT(max_map_space_pages < kMaxMapPageIndex);
}
+ // Prepares for a mark-compact GC.
+ virtual void PrepareForMarkCompact(bool will_compact);
+
// Given an index, returns the page address.
- // TODO(1600): this limit is artifical just to keep code compilable
- static const int kMaxMapPageIndex = 1 << 16;
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(Map::kSize)) {
- return RoundDown(size, Map::kSize);
- } else {
- return (size / Map::kSize) * Map::kSize;
+ Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+
+ static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
+
+ // Are map pointers encodable into map word?
+ bool MapPointersEncodable() {
+ if (!FLAG_use_big_map_space) {
+ ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
+ return true;
}
+ return CountPagesToTop() <= max_map_space_pages_;
+ }
+
+ // Should be called after forced sweep to find out if map space needs
+ // compaction.
+ bool NeedsCompaction(int live_maps) {
+ return !MapPointersEncodable() && live_maps <= CompactionThreshold();
+ }
+
+ Address TopAfterCompaction(int live_maps) {
+ ASSERT(NeedsCompaction(live_maps));
+
+ int pages_left = live_maps / kMapsPerPage;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (pages_left-- > 0) {
+ ASSERT(it.has_next());
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ }
+ ASSERT(it.has_next());
+ Page* top_page = it.next();
+ top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ ASSERT(top_page->is_valid());
+
+ int offset = live_maps % kMapsPerPage * Map::kSize;
+ Address top = top_page->ObjectAreaStart() + offset;
+ ASSERT(top < top_page->ObjectAreaEnd());
+ ASSERT(Contains(top));
+
+ return top;
+ }
+
+ void FinishCompaction(Address new_top, int live_maps) {
+ Page* top_page = Page::FromAddress(new_top);
+ ASSERT(top_page->is_valid());
+
+ SetAllocationInfo(&allocation_info_, top_page);
+ allocation_info_.top = new_top;
+
+ int new_size = live_maps * Map::kSize;
+ accounting_stats_.DeallocateBytes(accounting_stats_.Size());
+ accounting_stats_.AllocateBytes(new_size);
+
+ // Flush allocation watermarks.
+ for (Page* p = first_page_; p != top_page; p = p->next_page()) {
+ p->SetAllocationWatermark(p->AllocationTop());
+ }
+ top_page->SetAllocationWatermark(new_top);
+
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ intptr_t actual_size = 0;
+ for (Page* p = first_page_; p != top_page; p = p->next_page())
+ actual_size += kMapsPerPage * Map::kSize;
+ actual_size += (new_top - top_page->ObjectAreaStart());
+ ASSERT(accounting_stats_.Size() == actual_size);
+ }
+#endif
+
+ Shrink();
+ ResetFreeList();
}
protected:
@@ -2405,6 +2098,9 @@ class MapSpace : public FixedSpace {
const int max_map_space_pages_;
+ // An array of page start address in a map space.
+ Address page_addresses_[kMaxMapPageIndex];
+
public:
TRACK_MEMORY("MapSpace")
};
@@ -2420,14 +2116,6 @@ class CellSpace : public FixedSpace {
: FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
{}
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
- return RoundDown(size, JSGlobalPropertyCell::kSize);
- } else {
- return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
- }
- }
-
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
@@ -2445,6 +2133,64 @@ class CellSpace : public FixedSpace {
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.
+// A LargeObjectChunk holds exactly one large object page with exactly one
+// large object.
+class LargeObjectChunk {
+ public:
+ // Allocates a new LargeObjectChunk that contains a large object page
+ // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
+ // object) bytes after the object area start of that page.
+ static LargeObjectChunk* New(int size_in_bytes, Executability executable);
+
+ // Free the memory associated with the chunk.
+ void Free(Executability executable);
+
+ // Interpret a raw address as a large object chunk.
+ static LargeObjectChunk* FromAddress(Address address) {
+ return reinterpret_cast<LargeObjectChunk*>(address);
+ }
+
+ // Returns the address of this chunk.
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ Page* GetPage() {
+ return Page::FromAddress(RoundUp(address(), Page::kPageSize));
+ }
+
+ // Accessors for the fields of the chunk.
+ LargeObjectChunk* next() { return next_; }
+ void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
+ size_t size() { return size_ & ~Page::kPageFlagMask; }
+
+ // Compute the start address in the chunk.
+ Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
+
+ // Returns the object in this chunk.
+ HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
+
+ // Given a requested size returns the physical size of a chunk to be
+ // allocated.
+ static int ChunkSizeFor(int size_in_bytes);
+
+ // Given a chunk size, returns the object size it can accommodate. Used by
+ // LargeObjectSpace::Available.
+ static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+ if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+ return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+ }
+
+ private:
+ // A pointer to the next large object chunk in the space or NULL.
+ LargeObjectChunk* next_;
+
+ // The total size of this chunk.
+ size_t size_;
+
+ public:
+ TRACK_MEMORY("LargeObjectChunk")
+};
+
+
class LargeObjectSpace : public Space {
public:
LargeObjectSpace(Heap* heap, AllocationSpace id);
@@ -2456,15 +2202,12 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
- Executability executable);
+ // Allocates a (non-FixedArray, non-Code) large object.
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
+ // Allocates a large Code object.
+ MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
+ // Allocates a large FixedArray.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space.
inline intptr_t Available();
@@ -2488,7 +2231,10 @@ class LargeObjectSpace : public Space {
// Finds a large object page containing the given pc, returns NULL
// if such a page doesn't exist.
- LargePage* FindPageContainingPc(Address pc);
+ LargeObjectChunk* FindChunkContainingPc(Address pc);
+
+ // Iterates objects covered by dirty regions.
+ void IterateDirtyRegions(ObjectSlotCallback func);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@@ -2497,15 +2243,13 @@ class LargeObjectSpace : public Space {
bool Contains(HeapObject* obj);
// Checks whether the space is empty.
- bool IsEmpty() { return first_page_ == NULL; }
+ bool IsEmpty() { return first_chunk_ == NULL; }
// See the comments for ReserveSpace in the Space class. This has to be
// called after ReserveSpace has been called on the paged spaces, since they
// may use some memory, leaving less for large objects.
virtual bool ReserveSpace(int bytes);
- LargePage* first_page() { return first_page_; }
-
#ifdef DEBUG
virtual void Verify();
virtual void Print();
@@ -2518,11 +2262,17 @@ class LargeObjectSpace : public Space {
private:
// The head of the linked list of large object chunks.
- LargePage* first_page_;
+ LargeObjectChunk* first_chunk_;
intptr_t size_; // allocated bytes
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
+ // Shared implementation of AllocateRaw, AllocateRawCode and
+ // AllocateRawFixedArray.
+ MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
+ int object_size,
+ Executability executable);
+
friend class LargeObjectIterator;
public:
@@ -2535,78 +2285,17 @@ class LargeObjectIterator: public ObjectIterator {
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
- HeapObject* Next();
+ HeapObject* next();
// implementation of ObjectIterator.
- virtual HeapObject* next_object() { return Next(); }
+ virtual HeapObject* next_object() { return next(); }
private:
- LargePage* current_;
+ LargeObjectChunk* current_;
HeapObjectCallback size_func_;
};
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
- public:
- inline explicit PointerChunkIterator(Heap* heap);
-
- // Return NULL when the iterator is done.
- MemoryChunk* next() {
- switch (state_) {
- case kOldPointerState: {
- if (old_pointer_iterator_.has_next()) {
- return old_pointer_iterator_.next();
- }
- state_ = kMapState;
- // Fall through.
- }
- case kMapState: {
- if (map_iterator_.has_next()) {
- return map_iterator_.next();
- }
- state_ = kLargeObjectState;
- // Fall through.
- }
- case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
- }
- case kFinishedState:
- return NULL;
- default:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
-
- private:
- enum State {
- kOldPointerState,
- kMapState,
- kLargeObjectState,
- kFinishedState
- };
- State state_;
- PageIterator old_pointer_iterator_;
- PageIterator map_iterator_;
- LargeObjectIterator lo_iterator_;
-};
-
-
#ifdef DEBUG
struct CommentStatistic {
const char* comment;