diff options
author | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2016-10-13 15:24:50 +0000 |
---|---|---|
committer | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2016-10-13 15:24:50 +0000 |
commit | 634150fbd353b4b99a31c94963f1decb51fb8e51 (patch) | |
tree | d5aaac15d5bd0d4b125140016f3ea84a714e3713 /libgo/runtime/mheap.c | |
parent | ff56abd1656bc6c0f4606f203fa06fab74efa1de (diff) | |
download | gcc-634150fbd353b4b99a31c94963f1decb51fb8e51.tar.gz |
runtime: copy mstats code from Go 1.7 runtime
This replaces mem.go and the C runtime_ReadMemStats function with the Go
1.7 mstats.go.
The GCStats code is commented out for now. The corresponding gccgo code
is in runtime/mgc0.c.
The variables memstats and worldsema are shared between the Go code and
the C code, but are not exported. To make this work, add temporary
accessor functions acquireWorldsema, releaseWorldsema, getMstats (the
latter known as mstats in the C code).
Check the preemptoff field of m when allocating and when considering
whether to start a GC. This works with the new stopTheWorld and
startTheWorld functions in Go, which are essentially the Go 1.7
versions.
Change the compiler to stack allocate closures when compiling the
runtime package. Within the runtime packages closures do not escape.
This is similar to what the gc compiler does, except that the gc
compiler, when compiling the runtime package, gives an error if escape
analysis shows that a closure does escape. I added this here because
the Go version of ReadMemStats calls systemstack with a closure, and
having that allocate memory was causing some tests that measure memory
allocations to fail.
Reviewed-on: https://go-review.googlesource.com/30972
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@241124 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo/runtime/mheap.c')
-rw-r--r-- | libgo/runtime/mheap.c | 53 |
1 files changed, 30 insertions, 23 deletions
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c index 04a5b98772c..c167bdc8195 100644 --- a/libgo/runtime/mheap.c +++ b/libgo/runtime/mheap.c @@ -36,7 +36,7 @@ RecordSpan(void *vh, byte *p) cap = 64*1024/sizeof(all[0]); if(cap < h->nspancap*3/2) cap = h->nspancap*3/2; - all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats.other_sys); + all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats()->other_sys); if(all == nil) runtime_throw("runtime: cannot allocate memory"); if(h->allspans) { @@ -44,7 +44,7 @@ RecordSpan(void *vh, byte *p) // Don't free the old array if it's referenced by sweep. // See the comment in mgc0.c. if(h->allspans != runtime_mheap.sweepspans) - runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys); + runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats()->other_sys); } h->allspans = all; h->nspancap = cap; @@ -56,12 +56,14 @@ RecordSpan(void *vh, byte *p) void runtime_MHeap_Init(MHeap *h) { + MStats *pmstats; uint32 i; - runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys); - runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys); - runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys); - runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys); + pmstats = mstats(); + runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &pmstats->mspan_sys); + runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &pmstats->mcache_sys); + runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &pmstats->other_sys); + runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &pmstats->other_sys); // h->mapcache needs no init for(i=0; i<nelem(h->free); i++) { runtime_MSpanList_Init(&h->free[i]); @@ -88,7 +90,7 @@ runtime_MHeap_MapSpans(MHeap *h) n = ROUND(n, pagesize); if(h->spans_mapped >= n) return; - runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys); + runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats()->other_sys); h->spans_mapped = n; } @@ -173,17 +175,19 @@ MHeap_Reclaim(MHeap *h, uintptr npage) MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero) { + MStats *pmstats; MSpan *s; runtime_lock(h); - mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; + pmstats = mstats(); + pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; runtime_m()->mcache->local_cachealloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { - mstats.heap_inuse += npage<<PageShift; + pmstats->heap_inuse += npage<<PageShift; if(large) { - mstats.heap_objects++; - mstats.heap_alloc += npage<<PageShift; + pmstats->heap_objects++; + pmstats->heap_alloc += npage<<PageShift; // Swept spans are at the end of lists. if(s->npages < nelem(h->free)) runtime_MSpanList_InsertBack(&h->busy[s->npages], s); @@ -237,8 +241,8 @@ HaveSpan: runtime_MSpanList_Remove(s); runtime_atomicstore(&s->sweepgen, h->sweepgen); s->state = MSpanInUse; - mstats.heap_idle -= s->npages<<PageShift; - mstats.heap_released -= s->npreleased<<PageShift; + mstats()->heap_idle -= s->npages<<PageShift; + mstats()->heap_released -= s->npreleased<<PageShift; if(s->npreleased > 0) runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift); s->npreleased = 0; @@ -326,7 +330,7 @@ MHeap_Grow(MHeap *h, uintptr npage) v = runtime_MHeap_SysAlloc(h, ask); } if(v == nil) { - runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys); + runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats()->heap_sys); return false; } } @@ -386,13 +390,16 @@ runtime_MHeap_LookupMaybe(MHeap *h, void *v) void runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct) { + MStats *pmstats; + runtime_lock(h); - mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; + pmstats = mstats(); + pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; runtime_m()->mcache->local_cachealloc = 0; - mstats.heap_inuse -= s->npages<<PageShift; + pmstats->heap_inuse -= s->npages<<PageShift; if(acct) { - mstats.heap_alloc -= s->npages<<PageShift; - mstats.heap_objects--; + pmstats->heap_alloc -= s->npages<<PageShift; + pmstats->heap_objects--; } MHeap_FreeLocked(h, s); runtime_unlock(h); @@ -411,7 +418,7 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen); runtime_throw("MHeap_FreeLocked - invalid free"); } - mstats.heap_idle += s->npages<<PageShift; + mstats()->heap_idle += s->npages<<PageShift; s->state = MSpanFree; runtime_MSpanList_Remove(s); // Stamp newly unused spans. The scavenger will use that @@ -472,7 +479,7 @@ scavengelist(MSpan *list, uint64 now, uint64 limit) for(s=list->next; s != list; s=s->next) { if((now - s->unusedsince) > limit && s->npreleased != s->npages) { released = (s->npages - s->npreleased) << PageShift; - mstats.heap_released += released; + mstats()->heap_released += released; sumreleased += released; s->npreleased = s->npages; @@ -508,8 +515,8 @@ scavenge(int32 k, uint64 now, uint64 limit) if(sumreleased > 0) runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20); runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n", - k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20, - mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20); + k, mstats()->heap_inuse>>20, mstats()->heap_idle>>20, mstats()->heap_sys>>20, + mstats()->heap_released>>20, (mstats()->heap_sys - mstats()->heap_released)>>20); } } @@ -550,7 +557,7 @@ runtime_MHeap_Scavenger(void* dummy) runtime_lock(h); unixnow = runtime_unixnanotime(); - if(unixnow - mstats.last_gc > forcegc) { + if(unixnow - mstats()->last_gc > forcegc) { runtime_unlock(h); // The scavenger can not block other goroutines, // otherwise deadlock detector can fire spuriously. |