summaryrefslogtreecommitdiff
path: root/libgo/runtime/malloc.h
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/malloc.h')
-rw-r--r--libgo/runtime/malloc.h224
1 files changed, 182 insertions, 42 deletions
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index e1a5be9991..86b9fccf90 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -20,7 +20,7 @@
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
// MSpan: a run of pages managed by the MHeap.
// MCentral: a shared free list for a given size class.
-// MCache: a per-thread (in Go, per-M) cache for small objects.
+// MCache: a per-thread (in Go, per-P) cache for small objects.
// MStats: allocation statistics.
//
// Allocating a small object proceeds up a hierarchy of caches:
@@ -66,14 +66,14 @@
//
// The small objects on the MCache and MCentral free lists
// may or may not be zeroed. They are zeroed if and only if
-// the second word of the object is zero. The spans in the
-// page heap are always zeroed. When a span full of objects
-// is returned to the page heap, the objects that need to be
-// are zeroed first. There are two main benefits to delaying the
+// the second word of the object is zero. A span in the
+// page heap is zeroed unless s->needzero is set. When a span
+// is allocated to break into small objects, it is zeroed if needed
+// and s->needzero is set. There are two main benefits to delaying the
// zeroing this way:
//
// 1. stack frames allocated from the small object lists
-// can avoid zeroing altogether.
+// or the page heap can avoid zeroing altogether.
// 2. the cost of zeroing when reusing a small object is
// charged to the mutator, not the garbage collector.
//
@@ -90,7 +90,7 @@ typedef struct GCStats GCStats;
enum
{
- PageShift = 12,
+ PageShift = 13,
PageSize = 1<<PageShift,
PageMask = PageSize - 1,
};
@@ -103,11 +103,15 @@ enum
// size classes. NumSizeClasses is that number. It's needed here
// because there are static arrays of this length; when msize runs its
// size choosing algorithm it double-checks that NumSizeClasses agrees.
- NumSizeClasses = 61,
+ NumSizeClasses = 67,
// Tunable constants.
MaxSmallSize = 32<<10,
+ // Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
+ TinySize = 16,
+ TinySizeClass = 2,
+
FixAllocChunk = 16<<10, // Chunk size for FixAlloc
MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
HeapAllocChunk = 1<<20, // Chunk size for heap growth
@@ -154,6 +158,9 @@ struct MLink
// SysAlloc obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte.
+// NOTE: SysAlloc returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by SysAlloc.
//
// SysUnused notifies the operating system that the contents
// of the memory region are no longer needed and can be reused
@@ -168,16 +175,29 @@ struct MLink
// SysReserve reserves address space without allocating memory.
// If the pointer passed to it is non-nil, the caller wants the
// reservation there, but SysReserve can still choose another
-// location if that one is unavailable.
+// location if that one is unavailable. On some systems and in some
+// cases SysReserve will simply check that the address space is
+// available and not actually reserve it. If SysReserve returns
+// non-nil, it sets *reserved to true if the address space is
+// reserved, false if it has merely been checked.
+// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by SysAlloc.
//
// SysMap maps previously reserved address space for use.
+// The reserved argument is true if the address space was really
+// reserved, not merely checked.
+//
+// SysFault marks a (already SysAlloc'd) region to fault
+// if accessed. Used only for debugging the runtime.
void* runtime_SysAlloc(uintptr nbytes, uint64 *stat);
void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat);
void runtime_SysUnused(void *v, uintptr nbytes);
void runtime_SysUsed(void *v, uintptr nbytes);
-void runtime_SysMap(void *v, uintptr nbytes, uint64 *stat);
-void* runtime_SysReserve(void *v, uintptr nbytes);
+void runtime_SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
+void* runtime_SysReserve(void *v, uintptr nbytes, bool *reserved);
+void runtime_SysFault(void *v, uintptr nbytes);
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
@@ -256,7 +276,8 @@ struct MStats
};
extern MStats mstats
- __asm__ (GOSYM_PREFIX "runtime.VmemStats");
+ __asm__ (GOSYM_PREFIX "runtime.memStats");
+void runtime_updatememstats(GCStats *stats);
// Size classes. Computed and initialized by InitSizes.
//
@@ -269,6 +290,7 @@ extern MStats mstats
// making new objects in class i
int32 runtime_SizeToClass(int32);
+uintptr runtime_roundupsize(uintptr);
extern int32 runtime_class_to_size[NumSizeClasses];
extern int32 runtime_class_to_allocnpages[NumSizeClasses];
extern int8 runtime_size_to_class8[1024/8 + 1];
@@ -276,8 +298,6 @@ extern int8 runtime_size_to_class128[(MaxSmallSize-1024)/128 + 1];
extern void runtime_InitSizes(void);
-// Per-thread (in Go, per-M) cache for small objects.
-// No locking needed because it is per-thread (per-M).
typedef struct MCacheList MCacheList;
struct MCacheList
{
@@ -285,14 +305,21 @@ struct MCacheList
uint32 nlist;
};
+// Per-thread (in Go, per-P) cache for small objects.
+// No locking needed because it is per-thread (per-P).
struct MCache
{
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
int32 next_sample; // trigger heap sample after allocating this many bytes
intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
+ // Allocator cache for tiny objects w/o pointers.
+ // See "Tiny allocator" comment in malloc.goc.
+ byte* tiny;
+ uintptr tinysize;
// The rest is not accessed on every malloc.
- MCacheList list[NumSizeClasses];
+ MSpan* alloc[NumSizeClasses]; // spans to allocate from
+ MCacheList free[NumSizeClasses];// lists of explicitly freed objects
// Local allocator stats, flushed during GC.
uintptr local_nlookup; // number of pointer lookups
uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
@@ -300,8 +327,8 @@ struct MCache
uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
};
-void runtime_MCache_Refill(MCache *c, int32 sizeclass);
-void runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass);
+void runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
void runtime_MCache_ReleaseAll(MCache *c);
// MTypes describes the types of blocks allocated within a span.
@@ -341,6 +368,43 @@ struct MTypes
uintptr data;
};
+enum
+{
+ KindSpecialFinalizer = 1,
+ KindSpecialProfile = 2,
+ // Note: The finalizer special must be first because if we're freeing
+ // an object, a finalizer special will cause the freeing operation
+ // to abort, and we want to keep the other special records around
+ // if that happens.
+};
+
+typedef struct Special Special;
+struct Special
+{
+ Special* next; // linked list in span
+ uint16 offset; // span offset of object
+ byte kind; // kind of Special
+};
+
+// The described object has a finalizer set for it.
+typedef struct SpecialFinalizer SpecialFinalizer;
+struct SpecialFinalizer
+{
+ Special;
+ FuncVal* fn;
+ const FuncType* ft;
+ const PtrType* ot;
+};
+
+// The described object is being heap profiled.
+typedef struct Bucket Bucket; // from mprof.goc
+typedef struct SpecialProfile SpecialProfile;
+struct SpecialProfile
+{
+ Special;
+ Bucket* b;
+};
+
// An MSpan is a run of pages.
enum
{
@@ -356,17 +420,30 @@ struct MSpan
PageID start; // starting page number
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
- uint32 ref; // number of allocated objects in this span
- int32 sizeclass; // size class
+ // sweep generation:
+ // if sweepgen == h->sweepgen - 2, the span needs sweeping
+ // if sweepgen == h->sweepgen - 1, the span is currently being swept
+ // if sweepgen == h->sweepgen, the span is swept and ready to use
+ // h->sweepgen is incremented by 2 after every GC
+ uint32 sweepgen;
+ uint16 ref; // capacity - number of objects in freelist
+ uint8 sizeclass; // size class
+ bool incache; // being used by an MCache
+ uint8 state; // MSpanInUse etc
+ uint8 needzero; // needs to be zeroed before allocation
uintptr elemsize; // computed from sizeclass or from npages
- uint32 state; // MSpanInUse etc
int64 unusedsince; // First time spotted by GC in MSpanFree state
uintptr npreleased; // number of pages released to the OS
byte *limit; // end of data in span
MTypes types; // types of allocated objects in this span
+ Lock specialLock; // guards specials list
+ Special *specials; // linked list of special records sorted by offset.
+ MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
};
void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
+void runtime_MSpan_EnsureSwept(MSpan *span);
+bool runtime_MSpan_Sweep(MSpan *span);
// Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the
@@ -374,6 +451,7 @@ void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
void runtime_MSpanList_Init(MSpan *list);
bool runtime_MSpanList_IsEmpty(MSpan *list);
void runtime_MSpanList_Insert(MSpan *list, MSpan *span);
+void runtime_MSpanList_InsertBack(MSpan *list, MSpan *span);
void runtime_MSpanList_Remove(MSpan *span); // from whatever list it is in
@@ -382,15 +460,16 @@ struct MCentral
{
Lock;
int32 sizeclass;
- MSpan nonempty;
- MSpan empty;
- int32 nfree;
+ MSpan nonempty; // list of spans with a free object
+ MSpan empty; // list of spans with no free objects (or cached in an MCache)
+ int32 nfree; // # of objects available in nonempty spans
};
void runtime_MCentral_Init(MCentral *c, int32 sizeclass);
-int32 runtime_MCentral_AllocList(MCentral *c, MLink **first);
-void runtime_MCentral_FreeList(MCentral *c, MLink *first);
-void runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
+MSpan* runtime_MCentral_CacheSpan(MCentral *c);
+void runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s);
+bool runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
+void runtime_MCentral_FreeList(MCentral *c, MLink *start); // TODO: need this?
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
@@ -399,10 +478,15 @@ struct MHeap
{
Lock;
MSpan free[MaxMHeapList]; // free lists of given length
- MSpan large; // free lists length >= MaxMHeapList
- MSpan **allspans;
+ MSpan freelarge; // free lists length >= MaxMHeapList
+ MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
+ MSpan busylarge; // busy lists of large objects length >= MaxMHeapList
+ MSpan **allspans; // all spans out there
+ MSpan **sweepspans; // copy of allspans referenced by sweeper
uint32 nspan;
uint32 nspancap;
+ uint32 sweepgen; // sweep generation, see comment in MSpan
+ uint32 sweepdone; // all spans are swept
// span lookup
MSpan** spans;
@@ -414,6 +498,7 @@ struct MHeap
byte *arena_start;
byte *arena_used;
byte *arena_end;
+ bool arena_reserved;
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
@@ -426,6 +511,9 @@ struct MHeap
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
+ FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer*
+ FixAlloc specialprofilealloc; // allocator for SpecialProfile*
+ Lock speciallock; // lock for sepcial record allocators.
// Malloc stats.
uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
@@ -435,7 +523,7 @@ struct MHeap
extern MHeap runtime_mheap;
void runtime_MHeap_Init(MHeap *h);
-MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed);
+MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
void runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
MSpan* runtime_MHeap_Lookup(MHeap *h, void *v);
MSpan* runtime_MHeap_LookupMaybe(MHeap *h, void *v);
@@ -444,26 +532,28 @@ void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
void runtime_MHeap_MapBits(MHeap *h);
void runtime_MHeap_MapSpans(MHeap *h);
void runtime_MHeap_Scavenger(void*);
+void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime_gc(int32 force);
-void runtime_markallocated(void *v, uintptr n, bool noptr);
+uintptr runtime_sweepone(void);
+void runtime_markscan(void *v);
+void runtime_marknogc(void *v);
void runtime_checkallocated(void *v, uintptr n);
-void runtime_markfreed(void *v, uintptr n);
+void runtime_markfreed(void *v);
void runtime_checkfreed(void *v, uintptr n);
extern int32 runtime_checking;
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
-bool runtime_blockspecial(void*);
-void runtime_setblockspecial(void*, bool);
void runtime_purgecachedstats(MCache*);
void* runtime_cnew(const Type*);
void* runtime_cnewarray(const Type*, intgo);
+void runtime_tracealloc(void*, uintptr, uintptr);
+void runtime_tracefree(void*, uintptr);
+void runtime_tracegc(void);
-void runtime_settype_flush(M*);
-void runtime_settype_sysfree(MSpan*);
uintptr runtime_gettype(void*);
enum
@@ -485,17 +575,27 @@ struct Obj
};
void runtime_MProf_Malloc(void*, uintptr);
-void runtime_MProf_Free(void*, uintptr);
+void runtime_MProf_Free(Bucket*, uintptr, bool);
void runtime_MProf_GC(void);
-void runtime_MProf_Mark(void (*addroot)(Obj));
+void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
int32 runtime_gcprocs(void);
void runtime_helpgc(int32 nproc);
void runtime_gchelper(void);
+void runtime_createfing(void);
+G* runtime_wakefing(void);
+extern bool runtime_fingwait;
+extern bool runtime_fingwake;
+
+void runtime_setprofilebucket(void *p, Bucket *b);
struct __go_func_type;
struct __go_ptr_type;
-bool runtime_getfinalizer(void *p, bool del, FuncVal **fn, const struct __go_func_type **ft, const struct __go_ptr_type **ot);
-void runtime_walkfintab(void (*fn)(void*), void (*scan)(Obj));
+bool runtime_addfinalizer(void *p, FuncVal *fn, const struct __go_func_type*, const struct __go_ptr_type*);
+void runtime_removefinalizer(void*);
+void runtime_queuefinalizer(void *p, FuncVal *fn, const struct __go_func_type *ft, const struct __go_ptr_type *ot);
+
+void runtime_freeallspecials(MSpan *span, void *p, uintptr size);
+bool runtime_freespecial(Special *s, void *p, uintptr size, bool freed);
enum
{
@@ -507,12 +607,52 @@ enum
DebugTypeAtBlockEnd = 0,
};
+// Information from the compiler about the layout of stack frames.
+typedef struct BitVector BitVector;
+struct BitVector
+{
+ int32 n; // # of bits
+ uint32 *data;
+};
+typedef struct StackMap StackMap;
+struct StackMap
+{
+ int32 n; // number of bitmaps
+ int32 nbit; // number of bits in each bitmap
+ uint32 data[];
+};
+enum {
+ // Pointer map
+ BitsPerPointer = 2,
+ BitsDead = 0,
+ BitsScalar = 1,
+ BitsPointer = 2,
+ BitsMultiWord = 3,
+ // BitsMultiWord will be set for the first word of a multi-word item.
+ // When it is set, one of the following will be set for the second word.
+ BitsString = 0,
+ BitsSlice = 1,
+ BitsIface = 2,
+ BitsEface = 3,
+};
+// Returns pointer map data for the given stackmap index
+// (the index is encoded in PCDATA_StackMapIndex).
+BitVector runtime_stackmapdata(StackMap *stackmap, int32 n);
+
// defined in mgc0.go
void runtime_gc_m_ptr(Eface*);
+void runtime_gc_g_ptr(Eface*);
void runtime_gc_itab_ptr(Eface*);
void runtime_memorydump(void);
+int32 runtime_setgcpercent(int32);
+
+// Value we use to mark dead pointers when GODEBUG=gcdead=1.
+#define PoisonGC ((uintptr)0xf969696969696969ULL)
+#define PoisonStack ((uintptr)0x6868686868686868ULL)
-void runtime_proc_scan(void (*)(Obj));
-void runtime_time_scan(void (*)(Obj));
-void runtime_netpoll_scan(void (*)(Obj));
+struct Workbuf;
+void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
+void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
+void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
+void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));