diff options
author | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2012-11-21 07:03:38 +0000 |
---|---|---|
committer | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2012-11-21 07:03:38 +0000 |
commit | 79a796b7d3db5d100eedfc774954a6b44944363a (patch) | |
tree | 72455aea0286937aa08cc141e5efc800e4626577 /libgo/runtime | |
parent | 7224cf54b3af2b931fb83af65f9cfab5c1df814a (diff) | |
download | gcc-79a796b7d3db5d100eedfc774954a6b44944363a.tar.gz |
libgo: Update to current version of master library.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@193688 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo/runtime')
-rw-r--r-- | libgo/runtime/go-cgo.c | 20 | ||||
-rw-r--r-- | libgo/runtime/go-make-slice.c | 23 | ||||
-rw-r--r-- | libgo/runtime/go-traceback.c | 2 | ||||
-rw-r--r-- | libgo/runtime/go-unsafe-new.c | 22 | ||||
-rw-r--r-- | libgo/runtime/go-unsafe-newarray.c | 33 | ||||
-rw-r--r-- | libgo/runtime/malloc.goc | 37 | ||||
-rw-r--r-- | libgo/runtime/malloc.h | 21 | ||||
-rw-r--r-- | libgo/runtime/mgc0.c | 80 | ||||
-rw-r--r-- | libgo/runtime/mheap.c | 21 | ||||
-rw-r--r-- | libgo/runtime/mprof.goc | 3 | ||||
-rw-r--r-- | libgo/runtime/proc.c | 24 | ||||
-rw-r--r-- | libgo/runtime/race.h | 1 | ||||
-rw-r--r-- | libgo/runtime/runtime.c | 4 | ||||
-rw-r--r-- | libgo/runtime/runtime.h | 21 |
14 files changed, 224 insertions, 88 deletions
diff --git a/libgo/runtime/go-cgo.c b/libgo/runtime/go-cgo.c index d0c89f29459..47ee014181a 100644 --- a/libgo/runtime/go-cgo.c +++ b/libgo/runtime/go-cgo.c @@ -9,16 +9,6 @@ #include "interface.h" #include "go-panic.h" -/* Go memory allocated by code not written in Go. We keep a linked - list of these allocations so that the garbage collector can see - them. */ - -struct cgoalloc -{ - struct cgoalloc *next; - void *alloc; -}; - /* Prepare to call from code written in Go to code written in C or C++. This takes the current goroutine out of the Go scheduler, as though it were making a system call. Otherwise the program can @@ -67,7 +57,7 @@ syscall_cgocalldone () /* We are going back to Go, and we are not in a recursive call. Let the garbage collector clean up any unreferenced memory. */ - g->cgoalloc = NULL; + g->cgomal = NULL; } /* If we are invoked because the C function called _cgo_panic, then @@ -100,15 +90,15 @@ alloc_saved (size_t n) { void *ret; G *g; - struct cgoalloc *c; + CgoMal *c; ret = __go_alloc (n); g = runtime_g (); - c = (struct cgoalloc *) __go_alloc (sizeof (struct cgoalloc)); - c->next = g->cgoalloc; + c = (CgoMal *) __go_alloc (sizeof (CgoMal)); + c->next = g->cgomal; c->alloc = ret; - g->cgoalloc = c; + g->cgomal = c; return ret; } diff --git a/libgo/runtime/go-make-slice.c b/libgo/runtime/go-make-slice.c index 242c9bb7268..591ab37e0c6 100644 --- a/libgo/runtime/go-make-slice.c +++ b/libgo/runtime/go-make-slice.c @@ -15,6 +15,11 @@ #include "arch.h" #include "malloc.h" +/* Dummy word to use as base pointer for make([]T, 0). + Since you cannot take the address of such a slice, + you can't tell that they all have the same base pointer. */ +uintptr runtime_zerobase; + struct __go_open_array __go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len, uintptr_t cap) @@ -24,7 +29,6 @@ __go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len, intgo icap; uintptr_t size; struct __go_open_array ret; - unsigned int flag; __go_assert (td->__code == GO_SLICE); std = (const struct __go_slice_type *) td; @@ -44,10 +48,19 @@ __go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len, ret.__capacity = icap; size = cap * std->__element_type->__size; - flag = ((std->__element_type->__code & GO_NO_POINTERS) != 0 - ? FlagNoPointers - : 0); - ret.__values = runtime_mallocgc (size, flag, 1, 1); + + if (size == 0) + ret.__values = &runtime_zerobase; + else if ((std->__element_type->__code & GO_NO_POINTERS) != 0) + ret.__values = runtime_mallocgc (size, FlagNoPointers, 1, 1); + else + { + ret.__values = runtime_mallocgc (size, 0, 1, 1); + + if (UseSpanType) + runtime_settype (ret.__values, + (uintptr) std->__element_type | TypeInfo_Array); + } return ret; } diff --git a/libgo/runtime/go-traceback.c b/libgo/runtime/go-traceback.c index 2927351c8a9..11cc052829f 100644 --- a/libgo/runtime/go-traceback.c +++ b/libgo/runtime/go-traceback.c @@ -32,7 +32,7 @@ runtime_printtrace (uintptr *pcbuf, int32 c) intgo line; if (__go_file_line (pcbuf[i], &fn, &file, &line) - && runtime_showframe (fn.str)) + && runtime_showframe (fn)) { runtime_printf ("%S\n", fn); runtime_printf ("\t%S:%D\n", file, (int64) line); diff --git a/libgo/runtime/go-unsafe-new.c b/libgo/runtime/go-unsafe-new.c index a75c3884eed..8ed52343bfb 100644 --- a/libgo/runtime/go-unsafe-new.c +++ b/libgo/runtime/go-unsafe-new.c @@ -5,28 +5,30 @@ license that can be found in the LICENSE file. */ #include "runtime.h" -#include "go-alloc.h" +#include "arch.h" +#include "malloc.h" #include "go-type.h" #include "interface.h" /* Implement unsafe_New, called from the reflect package. */ -void *unsafe_New (struct __go_empty_interface type) +void *unsafe_New (const struct __go_type_descriptor *) asm ("reflect.unsafe_New"); /* The dynamic type of the argument will be a pointer to a type descriptor. */ void * -unsafe_New (struct __go_empty_interface type) +unsafe_New (const struct __go_type_descriptor *descriptor) { - const struct __go_type_descriptor *descriptor; + uint32 flag; + void *ret; - if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) - runtime_panicstring ("invalid interface value"); + flag = (descriptor->__code & GO_NO_POINTERS) != 0 ? FlagNoPointers : 0; + ret = runtime_mallocgc (descriptor->__size, flag, 1, 1); - /* FIXME: We should check __type_descriptor to verify that this is - really a type descriptor. */ - descriptor = (const struct __go_type_descriptor *) type.__object; - return __go_alloc (descriptor->__size); + if (UseSpanType && flag == 0) + runtime_settype (ret, (uintptr) descriptor | TypeInfo_SingleObject); + + return ret; } diff --git a/libgo/runtime/go-unsafe-newarray.c b/libgo/runtime/go-unsafe-newarray.c index 67399eac88c..47dcd847e64 100644 --- a/libgo/runtime/go-unsafe-newarray.c +++ b/libgo/runtime/go-unsafe-newarray.c @@ -5,28 +5,37 @@ license that can be found in the LICENSE file. */ #include "runtime.h" -#include "go-alloc.h" +#include "arch.h" +#include "malloc.h" #include "go-type.h" #include "interface.h" /* Implement unsafe_NewArray, called from the reflect package. */ -void *unsafe_NewArray (struct __go_empty_interface type, int n) +void *unsafe_NewArray (const struct __go_type_descriptor *, intgo) asm ("reflect.unsafe_NewArray"); /* The dynamic type of the argument will be a pointer to a type descriptor. */ void * -unsafe_NewArray (struct __go_empty_interface type, int n) +unsafe_NewArray (const struct __go_type_descriptor *descriptor, intgo n) { - const struct __go_type_descriptor *descriptor; - - if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) - runtime_panicstring ("invalid interface value"); - - /* FIXME: We should check __type_descriptor to verify that this is - really a type descriptor. */ - descriptor = (const struct __go_type_descriptor *) type.__object; - return __go_alloc (descriptor->__size * n); + uint64 size; + void *ret; + + size = n * descriptor->__size; + if (size == 0) + ret = &runtime_zerobase; + else if ((descriptor->__code & GO_NO_POINTERS) != 0) + ret = runtime_mallocgc (size, FlagNoPointers, 1, 1); + else + { + ret = runtime_mallocgc (size, 0, 1, 1); + + if (UseSpanType) + runtime_settype (ret, (uintptr) descriptor | TypeInfo_Array); + } + + return ret; } diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc index 1a0afede2c1..f1f3fcd7522 100644 --- a/libgo/runtime/malloc.goc +++ b/libgo/runtime/malloc.goc @@ -20,7 +20,7 @@ package runtime MHeap runtime_mheap; -extern MStats mstats; // defined in extern.go +extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go extern volatile intgo runtime_MemProfileRate __asm__ ("runtime.MemProfileRate"); @@ -341,32 +341,30 @@ runtime_mallocinit(void) // enough to hold 4 bits per allocated word. if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { // On a 64-bit machine, allocate from a single contiguous reservation. - // 16 GB should be big enough for now. + // 128 GB (MaxMem) should be big enough for now. // // The code will work with the reservation at any address, but ask - // SysReserve to use 0x000000f800000000 if possible. - // Allocating a 16 GB region takes away 36 bits, and the amd64 + // SysReserve to use 0x000000c000000000 if possible. + // Allocating a 128 GB region takes away 37 bits, and the amd64 // doesn't let us choose the top 17 bits, so that leaves the 11 bits - // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means - // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb. - // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and - // they are otherwise as far from ff (likely a common byte) as possible. - // Choosing 0x00 for the leading 6 bits was more arbitrary, but it - // is not a common ASCII code point either. Using 0x11f8 instead + // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means + // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x0x00df. + // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid + // UTF-8 sequences, and they are otherwise as far away from + // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8 // caused out of memory errors on OS X during thread allocations. // These choices are both for debuggability and to reduce the // odds of the conservative garbage collector not collecting memory // because some non-pointer block of memory had a bit pattern // that matched a memory address. // - // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) - // but it hardly matters: fc is not valid UTF-8 either, and we have to - // allocate 15 GB before we get that far. + // Actually we reserve 136 GB (because the bitmap ends up being 8 GB) + // but it hardly matters: e0 00 is not valid UTF-8 either. // // If this fails we fall back to the 32 bit memory mechanism - arena_size = (uintptr)(16LL<<30); + arena_size = MaxMem; bitmap_size = arena_size / (sizeof(void*)*8/4); - p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size); + p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size); } if (p == nil) { // On a 32-bit machine, we can't typically get away @@ -455,6 +453,8 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n) runtime_SysMap(p, n); h->arena_used += n; runtime_MHeap_MapBits(h); + if(raceenabled) + runtime_racemapshadow(p, n); return p; } @@ -481,6 +481,8 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n) if(h->arena_used > h->arena_end) h->arena_end = h->arena_used; runtime_MHeap_MapBits(h); + if(raceenabled) + runtime_racemapshadow(p, n); } return p; @@ -709,12 +711,13 @@ runtime_mal(uintptr n) } void * -runtime_new(Type *typ) +runtime_new(const Type *typ) { void *ret; uint32 flag; - runtime_m()->racepc = runtime_getcallerpc(&typ); + if(raceenabled) + runtime_m()->racepc = runtime_getcallerpc(&typ); flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0; ret = runtime_mallocgc(typ->__size, flag, 1, 1); diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h index 61f0f703a24..b3baaec0fcd 100644 --- a/libgo/runtime/malloc.h +++ b/libgo/runtime/malloc.h @@ -114,12 +114,12 @@ enum HeapAllocChunk = 1<<20, // Chunk size for heap growth // Number of bits in page to span calculations (4k pages). - // On 64-bit, we limit the arena to 16G, so 22 bits suffices. - // On 32-bit, we don't bother limiting anything: 20 bits for 4G. + // On 64-bit, we limit the arena to 128GB, or 37 bits. + // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. #if __SIZEOF_POINTER__ == 8 - MHeapMap_Bits = 22, + MHeapMap_Bits = 37 - PageShift, #else - MHeapMap_Bits = 20, + MHeapMap_Bits = 32 - PageShift, #endif // Max number of threads to run garbage collection. @@ -133,7 +133,7 @@ enum // This must be a #define instead of an enum because it // is so large. #if __SIZEOF_POINTER__ == 8 -#define MaxMem (16ULL<<30) /* 16 GB */ +#define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB */ #else #define MaxMem ((uintptr)-1) #endif @@ -198,7 +198,7 @@ void runtime_FixAlloc_Free(FixAlloc *f, void *p); // Statistics. -// Shared with Go: if you edit this structure, also edit extern.go. +// Shared with Go: if you edit this structure, also edit type MemStats in mem.go. struct MStats { // General statistics. @@ -358,7 +358,7 @@ struct MSpan uintptr npages; // number of pages in span MLink *freelist; // list of free objects uint32 ref; // number of allocated objects in this span - uint32 sizeclass; // size class + int32 sizeclass; // size class uintptr elemsize; // computed from sizeclass or from npages uint32 state; // MSpanInUse etc int64 unusedsince; // First time spotted by GC in MSpanFree state @@ -452,6 +452,8 @@ void runtime_unmarkspan(void *v, uintptr size); bool runtime_blockspecial(void*); void runtime_setblockspecial(void*, bool); void runtime_purgecachedstats(MCache*); +void* runtime_new(const Type *); +#define runtime_cnew(T) runtime_new(T) void runtime_settype(void*, uintptr); void runtime_settype_flush(M*, bool); @@ -487,3 +489,8 @@ enum // Enables type information at the end of blocks allocated from heap DebugTypeAtBlockEnd = 0, }; + +// defined in mgc0.go +void runtime_gc_m_ptr(Eface*); + +void runtime_memorydump(void); diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c index 72feb1f4481..5ea456f3cf1 100644 --- a/libgo/runtime/mgc0.c +++ b/libgo/runtime/mgc0.c @@ -874,6 +874,81 @@ sweepspan(ParFor *desc, uint32 idx) } } +static void +dumpspan(uint32 idx) +{ + int32 sizeclass, n, npages, i, column; + uintptr size; + byte *p; + byte *arena_start; + MSpan *s; + bool allocated, special; + + s = runtime_mheap.allspans[idx]; + if(s->state != MSpanInUse) + return; + arena_start = runtime_mheap.arena_start; + p = (byte*)(s->start << PageShift); + sizeclass = s->sizeclass; + size = s->elemsize; + if(sizeclass == 0) { + n = 1; + } else { + npages = runtime_class_to_allocnpages[sizeclass]; + n = (npages << PageShift) / size; + } + + runtime_printf("%p .. %p:\n", p, p+n*size); + column = 0; + for(; n>0; n--, p+=size) { + uintptr off, *bitp, shift, bits; + + off = (uintptr*)p - (uintptr*)arena_start; + bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + bits = *bitp>>shift; + + allocated = ((bits & bitAllocated) != 0); + special = ((bits & bitSpecial) != 0); + + for(i=0; (uint32)i<size; i+=sizeof(void*)) { + if(column == 0) { + runtime_printf("\t"); + } + if(i == 0) { + runtime_printf(allocated ? "(" : "["); + runtime_printf(special ? "@" : ""); + runtime_printf("%p: ", p+i); + } else { + runtime_printf(" "); + } + + runtime_printf("%p", *(void**)(p+i)); + + if(i+sizeof(void*) >= size) { + runtime_printf(allocated ? ") " : "] "); + } + + column++; + if(column == 8) { + runtime_printf("\n"); + column = 0; + } + } + } + runtime_printf("\n"); +} + +// A debugging function to dump the contents of memory +void +runtime_memorydump(void) +{ + uint32 spanidx; + + for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) { + dumpspan(spanidx); + } +} void runtime_gchelper(void) { @@ -1141,9 +1216,6 @@ runfinq(void* dummy __attribute__ ((unused))) FinBlock *fb, *next; uint32 i; - if(raceenabled) - runtime_racefingo(); - for(;;) { // There's no need for a lock in this section // because it only conflicts with the garbage @@ -1158,6 +1230,8 @@ runfinq(void* dummy __attribute__ ((unused))) runtime_park(nil, nil, "finalizer wait"); continue; } + if(raceenabled) + runtime_racefingo(); for(; fb; fb=next) { next = fb->next; for(i=0; i<(uint32)fb->cnt; i++) { diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c index 61ef4dd7992..7be135bb67c 100644 --- a/libgo/runtime/mheap.c +++ b/libgo/runtime/mheap.c @@ -343,6 +343,15 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) runtime_MSpanList_Insert(&h->large, s); } +static void +forcegchelper(void *vnote) +{ + Note *note = (Note*)vnote; + + runtime_gc(1); + runtime_notewakeup(note); +} + // Release (part of) unused memory to OS. // Goroutine created at startup. // Loop forever. @@ -356,7 +365,7 @@ runtime_MHeap_Scavenger(void* dummy) uintptr released, sumreleased; const byte *env; bool trace; - Note note; + Note note, *notep; USED(dummy); @@ -387,7 +396,15 @@ runtime_MHeap_Scavenger(void* dummy) now = runtime_nanotime(); if(now - mstats.last_gc > forcegc) { runtime_unlock(h); - runtime_gc(1); + // The scavenger can not block other goroutines, + // otherwise deadlock detector can fire spuriously. + // GC blocks other goroutines via the runtime_worldsema. + runtime_noteclear(¬e); + notep = ¬e; + __go_go(forcegchelper, (void*)¬ep); + runtime_entersyscall(); + runtime_notesleep(¬e); + runtime_exitsyscall(); runtime_lock(h); now = runtime_nanotime(); if (trace) diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc index edec3dc08dc..a92ef40b03b 100644 --- a/libgo/runtime/mprof.goc +++ b/libgo/runtime/mprof.goc @@ -315,8 +315,7 @@ runtime_blockevent(int64 cycles, int32 skip) runtime_unlock(&proflock); } -// Go interface to profile data. (Declared in extern.go) -// Assumes Go sizeof(int) == sizeof(int32) +// Go interface to profile data. (Declared in debug.go) // Must match MemProfileRecord in debug.go. typedef struct Record Record; diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c index b6254a7e53f..d90bb2c9104 100644 --- a/libgo/runtime/proc.c +++ b/libgo/runtime/proc.c @@ -18,6 +18,7 @@ #include "defs.h" #include "malloc.h" #include "race.h" +#include "go-type.h" #include "go-defer.h" #ifdef USING_SPLIT_STACK @@ -237,7 +238,7 @@ struct Sched { Lock; G *gfree; // available g's (status == Gdead) - int32 goidgen; + int64 goidgen; G *ghead; // g's waiting to run G *gtail; @@ -601,7 +602,7 @@ runtime_goroutineheader(G *gp) status = "???"; break; } - runtime_printf("goroutine %d [%s]:\n", gp->goid, status); + runtime_printf("goroutine %D [%s]:\n", gp->goid, status); } void @@ -745,7 +746,7 @@ gput(G *gp) // If g is the idle goroutine for an m, hand it off. if(gp->idlem != nil) { if(gp->idlem->idleg != nil) { - runtime_printf("m%d idle out of sync: g%d g%d\n", + runtime_printf("m%d idle out of sync: g%D g%D\n", gp->idlem->id, gp->idlem->idleg->goid, gp->goid); runtime_throw("runtime: double idle"); @@ -847,7 +848,7 @@ readylocked(G *gp) // Mark runnable. if(gp->status == Grunnable || gp->status == Grunning) { - runtime_printf("goroutine %d has status %d\n", gp->goid, gp->status); + runtime_printf("goroutine %D has status %d\n", gp->goid, gp->status); runtime_throw("bad g->status in ready"); } gp->status = Grunnable; @@ -1204,7 +1205,16 @@ runtime_newm(void) pthread_t tid; size_t stacksize; - mp = runtime_malloc(sizeof(M)); +#if 0 + static const Type *mtype; // The Go type M + if(mtype == nil) { + Eface e; + runtime_gc_m_ptr(&e); + mtype = ((const PtrType*)e.__type_descriptor)->__element_type; + } +#endif + + mp = runtime_mal(sizeof *mp); mcommoninit(mp); mp->g0 = runtime_malg(-1, nil, nil); @@ -1513,9 +1523,9 @@ __go_go(void (*fn)(void*), void* arg) byte *sp; size_t spsize; G *newg; - int32 goid; + int64 goid; - goid = runtime_xadd((uint32*)&runtime_sched.goidgen, 1); + goid = runtime_xadd64((uint64*)&runtime_sched.goidgen, 1); if(raceenabled) runtime_racegostart(goid, runtime_getcallerpc(&fn)); diff --git a/libgo/runtime/race.h b/libgo/runtime/race.h index 4bb7163f42c..2d8d095eaaf 100644 --- a/libgo/runtime/race.h +++ b/libgo/runtime/race.h @@ -15,6 +15,7 @@ void runtime_raceinit(void); // Finalize race detection subsystem, does not return. void runtime_racefini(void); +void runtime_racemapshadow(void *addr, uintptr size); void runtime_racemalloc(void *p, uintptr sz, void *pc); void runtime_racefree(void *p); void runtime_racegostart(int32 goid, void *pc); diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c index 3d4865a001a..211edc0f4c2 100644 --- a/libgo/runtime/runtime.c +++ b/libgo/runtime/runtime.c @@ -159,13 +159,13 @@ runtime_cputicks(void) } bool -runtime_showframe(const unsigned char *s) +runtime_showframe(String s) { static int32 traceback = -1; if(traceback < 0) traceback = runtime_gotraceback(); - return traceback > 1 || (s != nil && __builtin_strchr((const char*)s, '.') != nil && __builtin_memcmp(s, "runtime.", 7) != 0); + return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0); } static Lock ticksLock; diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index fb89a829ad6..977ae4906f4 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -63,6 +63,7 @@ typedef struct GCStats GCStats; typedef struct LFNode LFNode; typedef struct ParFor ParFor; typedef struct ParForThread ParForThread; +typedef struct CgoMal CgoMal; typedef struct __go_open_array Slice; typedef struct String String; @@ -72,13 +73,14 @@ typedef struct __go_type_descriptor Type; typedef struct __go_defer_stack Defer; typedef struct __go_panic_stack Panic; +typedef struct __go_ptr_type PtrType; typedef struct __go_func_type FuncType; typedef struct __go_map_type MapType; typedef struct Traceback Traceback; /* - * per-cpu declaration. + * Per-CPU declaration. */ extern M* runtime_m(void); extern G* runtime_g(void); @@ -159,7 +161,7 @@ struct G void* param; // passed parameter on wakeup bool fromgogo; // reached from gogo int16 status; - int32 goid; + int64 goid; uint32 selgen; // valid sudog pointer const char* waitreason; // if status==Gwaiting G* schedlink; @@ -178,7 +180,7 @@ struct G uintptr gopc; // pc of go statement that created this goroutine int32 ncgo; - struct cgoalloc *cgoalloc; + CgoMal* cgomal; Traceback* traceback; @@ -201,7 +203,7 @@ struct M int32 profilehz; int32 helpgc; uint32 fastrand; - uint64 ncgocall; + uint64 ncgocall; // number of cgo calls in total Note havenextg; G* nextg; M* alllink; // on allm @@ -316,6 +318,14 @@ struct ParFor uint64 nsleep; }; +// Track memory allocated by code not written in Go during a cgo call, +// so that the garbage collector can see them. +struct CgoMal +{ + CgoMal *next; + byte *alloc; +}; + /* * defined macros * you need super-gopher-guru privilege @@ -329,6 +339,7 @@ struct ParFor /* * external data */ +extern uintptr runtime_zerobase; G* runtime_allg; G* runtime_lastg; M* runtime_allm; @@ -569,7 +580,7 @@ void runtime_osyield(void); void runtime_LockOSThread(void) __asm__("runtime.LockOSThread"); void runtime_UnlockOSThread(void) __asm__("runtime.UnlockOSThread"); -bool runtime_showframe(const unsigned char*); +bool runtime_showframe(String); uintptr runtime_memlimit(void); |