diff options
Diffstat (limited to 'libgo/runtime')
-rw-r--r-- | libgo/runtime/go-reflect-call.c | 25 | ||||
-rw-r--r-- | libgo/runtime/go-reflect.c | 2 | ||||
-rw-r--r-- | libgo/runtime/go-type.h | 30 | ||||
-rw-r--r-- | libgo/runtime/malloc.goc | 5 | ||||
-rw-r--r-- | libgo/runtime/malloc.h | 8 | ||||
-rw-r--r-- | libgo/runtime/mgc0.c | 21 | ||||
-rw-r--r-- | libgo/runtime/mheap.c | 2 |
7 files changed, 48 insertions, 45 deletions
diff --git a/libgo/runtime/go-reflect-call.c b/libgo/runtime/go-reflect-call.c index 610fabf545f..6ae749f9a56 100644 --- a/libgo/runtime/go-reflect-call.c +++ b/libgo/runtime/go-reflect-call.c @@ -119,6 +119,22 @@ go_interface_to_ffi (void) return ret; } +/* Return an ffi_type for a Go complex type. */ + +static ffi_type * +go_complex_to_ffi (ffi_type *float_type) +{ + ffi_type *ret; + + ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); + ret->type = FFI_TYPE_STRUCT; + ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *)); + ret->elements[0] = float_type; + ret->elements[1] = float_type; + ret->elements[2] = NULL; + return ret; +} + /* Return an ffi_type for a type described by a __go_type_descriptor. */ @@ -141,12 +157,13 @@ go_type_to_ffi (const struct __go_type_descriptor *descriptor) if (sizeof (double) == 8) return &ffi_type_double; abort (); - case GO_FLOAT: - return &ffi_type_float; case GO_COMPLEX64: + if (sizeof (float) == 4) + return go_complex_to_ffi (&ffi_type_float); + abort (); case GO_COMPLEX128: - case GO_COMPLEX: - /* FIXME. */ + if (sizeof (double) == 8) + return go_complex_to_ffi (&ffi_type_double); abort (); case GO_INT16: return &ffi_type_sint16; diff --git a/libgo/runtime/go-reflect.c b/libgo/runtime/go-reflect.c index e608df70a7b..9485c0979b6 100644 --- a/libgo/runtime/go-reflect.c +++ b/libgo/runtime/go-reflect.c @@ -63,11 +63,9 @@ get_descriptor (int code) return &ptr_bool_descriptor; case GO_FLOAT32: case GO_FLOAT64: - case GO_FLOAT: return &ptr_float_descriptor; case GO_COMPLEX64: case GO_COMPLEX128: - case GO_COMPLEX: return &ptr_complex_descriptor; case GO_INT16: case GO_INT32: diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h index d8785b68f78..b1f32850a00 100644 --- a/libgo/runtime/go-type.h +++ b/libgo/runtime/go-type.h @@ -38,22 +38,20 @@ #define GO_UINT32 10 #define GO_UINT64 11 #define GO_UINTPTR 12 -#define GO_FLOAT 13 -#define GO_FLOAT32 14 -#define GO_FLOAT64 15 -#define GO_COMPLEX 16 -#define GO_COMPLEX64 17 -#define GO_COMPLEX128 18 -#define GO_ARRAY 19 -#define GO_CHAN 20 -#define GO_FUNC 21 -#define GO_INTERFACE 22 -#define GO_MAP 23 -#define GO_PTR 24 -#define GO_SLICE 25 -#define GO_STRING 26 -#define GO_STRUCT 27 -#define GO_UNSAFE_POINTER 28 +#define GO_FLOAT32 13 +#define GO_FLOAT64 14 +#define GO_COMPLEX64 15 +#define GO_COMPLEX128 16 +#define GO_ARRAY 17 +#define GO_CHAN 18 +#define GO_FUNC 19 +#define GO_INTERFACE 20 +#define GO_MAP 21 +#define GO_PTR 22 +#define GO_SLICE 23 +#define GO_STRING 24 +#define GO_STRUCT 25 +#define GO_UNSAFE_POINTER 26 /* For each Go type the compiler constructs one of these structures. This is used for type reflectin, interfaces, maps, and reference diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc index 7d6af6f3a0b..be37777d7bd 100644 --- a/libgo/runtime/malloc.goc +++ b/libgo/runtime/malloc.goc @@ -267,9 +267,14 @@ runtime_allocmcache(void) return c; } +extern int32 runtime_sizeof_C_MStats + __asm__ ("libgo_runtime.runtime.Sizeof_C_MStats"); + void runtime_mallocinit(void) { + runtime_sizeof_C_MStats = sizeof(MStats); + runtime_initfintab(); runtime_Mprof_Init(); diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h index 585996e6dca..369f9b8e771 100644 --- a/libgo/runtime/malloc.h +++ b/libgo/runtime/malloc.h @@ -176,6 +176,7 @@ struct MStats uint64 sys; // bytes obtained from system (should be sum of xxx_sys below) uint64 nlookup; // number of pointer lookups uint64 nmalloc; // number of mallocs + uint64 nfree; // number of frees // Statistics about malloc heap. // protected by mheap.Lock @@ -199,7 +200,8 @@ struct MStats // Statistics about garbage collector. // Protected by stopping the world during GC. uint64 next_gc; // next GC (in heap_alloc time) - uint64 pause_ns; + uint64 pause_total_ns; + uint64 pause_ns[256]; uint32 numgc; bool enablegc; bool debuggc; @@ -327,10 +329,6 @@ struct MHeap byte *min; byte *max; - // range of addresses we might see in a Native Client closure - byte *closure_min; - byte *closure_max; - // central free lists for small size classes. // the union makes sure that the MCentrals are // spaced 64 bytes apart, so that each MCentral.Lock diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c index 1a1a5ace834..f2703ab0263 100644 --- a/libgo/runtime/mgc0.c +++ b/libgo/runtime/mgc0.c @@ -74,22 +74,6 @@ scanblock(byte *b, int64 n) obj = vp[i]; if(obj == nil) continue; - if(runtime_mheap.closure_min != nil && runtime_mheap.closure_min <= (byte*)obj && (byte*)obj < runtime_mheap.closure_max) { - if((((uintptr)obj) & 63) != 0) - continue; - - // Looks like a Native Client closure. - // Actual pointer is pointed at by address in first instruction. - // Embedded pointer starts at byte 2. - // If it is f4f4f4f4 then that space hasn't been - // used for a closure yet (f4 is the HLT instruction). - // See nacl/386/closure.c for more. - void **pp; - pp = *(void***)((byte*)obj+2); - if(pp == (void**)0xf4f4f4f4) // HLT... - not a closure after all - continue; - obj = *pp; - } if(runtime_mheap.min <= (byte*)obj && (byte*)obj < runtime_mheap.max) { if(runtime_mlookup(obj, (byte**)&obj, &size, nil, &refp)) { ref = *refp; @@ -213,6 +197,7 @@ sweepspan(MSpan *s) case RefNone: // Free large object. mstats.alloc -= s->npages<<PageShift; + mstats.nfree++; runtime_memclr(p, s->npages<<PageShift); if(ref & RefProfiled) runtime_MProf_Free(p, s->npages<<PageShift); @@ -254,6 +239,7 @@ sweepspan(MSpan *s) if(size > (int32)sizeof(uintptr)) ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" mstats.alloc -= size; + mstats.nfree++; mstats.by_size[s->sizeclass].nfree++; runtime_MCache_Free(c, p, s->sizeclass, size); break; @@ -342,7 +328,8 @@ runtime_gc(int32 force __attribute__ ((unused))) t1 = runtime_nanotime(); mstats.numgc++; - mstats.pause_ns += t1 - t0; + mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t1 - t0; + mstats.pause_total_ns += t1 - t0; if(mstats.debuggc) runtime_printf("pause %llu\n", (unsigned long long)t1-t0); pthread_mutex_unlock(&gcsema); diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c index a375cad7345..52c6d8c1baa 100644 --- a/libgo/runtime/mheap.c +++ b/libgo/runtime/mheap.c @@ -166,7 +166,7 @@ MHeap_Grow(MHeap *h, uintptr npage) // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes // the overhead of an operating system mapping. - // For Native Client, allocate a multiple of 64kB (16 pages). + // Allocate a multiple of 64kB (16 pages). npage = (npage+15)&~15; ask = npage<<PageShift; if(ask < HeapAllocChunk) |