summaryrefslogtreecommitdiff
path: root/libgo/go
diff options
context:
space:
mode:
authorian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2017-01-03 22:58:48 +0000
committerian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2017-01-03 22:58:48 +0000
commit243acd49980309a543857c88732b44e5413bd23b (patch)
tree3478fe1adc6005eb49dda8f1c31857b5324fb886 /libgo/go
parent13504651c6c0b4cb5777ddcf072f6f7744aa105d (diff)
downloadgcc-243acd49980309a543857c88732b44e5413bd23b.tar.gz
runtime: remove __go_alloc and __go_free
Move allocg and handling of allgs slice from C to Go. Reviewed-on: https://go-review.googlesource.com/34797 git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@244036 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo/go')
-rw-r--r--libgo/go/runtime/mprof.go4
-rw-r--r--libgo/go/runtime/proc.go205
-rw-r--r--libgo/go/runtime/runtime2.go8
-rw-r--r--libgo/go/runtime/stubs.go59
-rw-r--r--libgo/go/runtime/traceback_gccgo.go55
5 files changed, 315 insertions, 16 deletions
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index a2701e32f76..cc0a673c8b4 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -556,7 +556,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
stopTheWorld("profile")
n = 1
- for _, gp1 := range allgs() {
+ for _, gp1 := range allgs {
if isOK(gp1) {
n++
}
@@ -571,7 +571,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
r = r[1:]
// Save other goroutines.
- for _, gp1 := range allgs() {
+ for _, gp1 := range allgs {
if isOK(gp1) {
if len(r) == 0 {
// Should be impossible, but better to return a
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index fa90a282866..78cc6ee7d8a 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -11,15 +11,18 @@ import (
// Functions temporarily called by C code.
//go:linkname newextram runtime.newextram
+//go:linkname checkdead runtime.checkdead
+//go:linkname schedtrace runtime.schedtrace
+//go:linkname allgadd runtime.allgadd
// Functions temporarily in C that have not yet been ported.
func allocm(*p, bool, *unsafe.Pointer, *uintptr) *m
func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
-func allgadd(*g)
// C functions for ucontext management.
func setGContext()
func makeGContext(*g, unsafe.Pointer, uintptr)
+func getTraceback(me, gp *g)
// main_init_done is a signal used by cgocallbackg that initialization
// has been completed. It is made before _cgo_notify_runtime_init_done,
@@ -27,6 +30,39 @@ func makeGContext(*g, unsafe.Pointer, uintptr)
// it is closed, meaning cgocallbackg can reliably receive from it.
var main_init_done chan bool
+var (
+ allgs []*g
+ allglock mutex
+)
+
+func allgadd(gp *g) {
+ if readgstatus(gp) == _Gidle {
+ throw("allgadd: bad status Gidle")
+ }
+
+ lock(&allglock)
+ allgs = append(allgs, gp)
+ allglen = uintptr(len(allgs))
+
+ // Grow GC rescan list if necessary.
+ if len(allgs) > cap(work.rescan.list) {
+ lock(&work.rescan.lock)
+ l := work.rescan.list
+ // Let append do the heavy lifting, but keep the
+ // length the same.
+ work.rescan.list = append(l[:cap(l)], 0)[:len(l)]
+ unlock(&work.rescan.lock)
+ }
+ unlock(&allglock)
+}
+
+// All reads and writes of g's status go through readgstatus, casgstatus
+// castogscanstatus, casfrom_Gscanstatus.
+//go:nosplit
+func readgstatus(gp *g) uint32 {
+ return atomic.Load(&gp.atomicstatus)
+}
+
// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// and casfrom_Gscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
@@ -328,3 +364,170 @@ func lockextra(nilokay bool) *m {
func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
+
+// Check for deadlock situation.
+// The check is based on number of running M's, if 0 -> deadlock.
+func checkdead() {
+ // For -buildmode=c-shared or -buildmode=c-archive it's OK if
+ // there are no running goroutines. The calling program is
+ // assumed to be running.
+ if islibrary || isarchive {
+ return
+ }
+
+ // If we are dying because of a signal caught on an already idle thread,
+ // freezetheworld will cause all running threads to block.
+ // And runtime will essentially enter into deadlock state,
+ // except that there is a thread that will call exit soon.
+ if panicking > 0 {
+ return
+ }
+
+ // -1 for sysmon
+ run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
+ if run > 0 {
+ return
+ }
+ if run < 0 {
+ print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
+ throw("checkdead: inconsistent counts")
+ }
+
+ grunning := 0
+ lock(&allglock)
+ for i := 0; i < len(allgs); i++ {
+ gp := allgs[i]
+ if isSystemGoroutine(gp) {
+ continue
+ }
+ s := readgstatus(gp)
+ switch s &^ _Gscan {
+ case _Gwaiting:
+ grunning++
+ case _Grunnable,
+ _Grunning,
+ _Gsyscall:
+ unlock(&allglock)
+ print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
+ throw("checkdead: runnable g")
+ }
+ }
+ unlock(&allglock)
+ if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
+ throw("no goroutines (main called runtime.Goexit) - deadlock!")
+ }
+
+ // Maybe jump time forward for playground.
+ gp := timejump()
+ if gp != nil {
+ // Temporarily commented out for gccgo.
+ // For gccgo this code will never run anyhow.
+ // casgstatus(gp, _Gwaiting, _Grunnable)
+ // globrunqput(gp)
+ // _p_ := pidleget()
+ // if _p_ == nil {
+ // throw("checkdead: no p for timer")
+ // }
+ // mp := mget()
+ // if mp == nil {
+ // // There should always be a free M since
+ // // nothing is running.
+ // throw("checkdead: no m for timer")
+ // }
+ // nmp.nextp.set(_p_)
+ // notewakeup(&mp.park)
+ // return
+ }
+
+ getg().m.throwing = -1 // do not dump full stacks
+ throw("all goroutines are asleep - deadlock!")
+}
+
+var starttime int64
+
+func schedtrace(detailed bool) {
+ now := nanotime()
+ if starttime == 0 {
+ starttime = now
+ }
+
+ gomaxprocs := int32(GOMAXPROCS(0))
+
+ lock(&sched.lock)
+ print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
+ if detailed {
+ print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
+ }
+ // We must be careful while reading data from P's, M's and G's.
+ // Even if we hold schedlock, most data can be changed concurrently.
+ // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
+ for i := int32(0); i < gomaxprocs; i++ {
+ _p_ := allp[i]
+ if _p_ == nil {
+ continue
+ }
+ mp := _p_.m.ptr()
+ h := atomic.Load(&_p_.runqhead)
+ t := atomic.Load(&_p_.runqtail)
+ if detailed {
+ id := int32(-1)
+ if mp != nil {
+ id = mp.id
+ }
+ print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
+ } else {
+ // In non-detailed mode format lengths of per-P run queues as:
+ // [len1 len2 len3 len4]
+ print(" ")
+ if i == 0 {
+ print("[")
+ }
+ print(t - h)
+ if i == gomaxprocs-1 {
+ print("]\n")
+ }
+ }
+ }
+
+ if !detailed {
+ unlock(&sched.lock)
+ return
+ }
+
+ for mp := allm(); mp != nil; mp = mp.alllink {
+ _p_ := mp.p.ptr()
+ gp := mp.curg
+ lockedg := mp.lockedg
+ id1 := int32(-1)
+ if _p_ != nil {
+ id1 = _p_.id
+ }
+ id2 := int64(-1)
+ if gp != nil {
+ id2 = gp.goid
+ }
+ id3 := int64(-1)
+ if lockedg != nil {
+ id3 = lockedg.goid
+ }
+ print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
+ }
+
+ lock(&allglock)
+ for gi := 0; gi < len(allgs); gi++ {
+ gp := allgs[gi]
+ mp := gp.m
+ lockedm := gp.lockedm
+ id1 := int32(-1)
+ if mp != nil {
+ id1 = mp.id
+ }
+ id2 := int32(-1)
+ if lockedm != nil {
+ id2 = lockedm.id
+ }
+ print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
+ }
+ unlock(&allglock)
+ unlock(&sched.lock)
+}
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index 6686e1f29b3..571972c1a83 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -755,9 +755,13 @@ const _TracebackMaxFrames = 100
var (
// emptystring string
- // allglen uintptr
+
+ allglen uintptr
+
// allm *m
- // allp [_MaxGomaxprocs + 1]*p
+
+ allp [_MaxGomaxprocs + 1]*p
+
// gomaxprocs int32
panicking uint32
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index c299ae0e8eb..3d184083d55 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -476,12 +476,6 @@ func UnlockOSThread()
func lockOSThread()
func unlockOSThread()
func allm() *m
-func allgs() []*g
-
-//go:nosplit
-func readgstatus(gp *g) uint32 {
- return atomic.Load(&gp.atomicstatus)
-}
// Temporary for gccgo until we port malloc.go
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
@@ -489,9 +483,6 @@ func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
// Temporary for gccgo until we port mheap.go
func setprofilebucket(p unsafe.Pointer, b *bucket)
-// Currently in proc.c.
-func tracebackothers(*g)
-
// Temporary for gccgo until we port mgc.go.
func setgcpercent(int32) int32
@@ -530,9 +521,7 @@ func getZerobase() *uintptr {
// Temporary for gccgo until we port proc.go.
func sigprof()
func mcount() int32
-func gcount() int32
func goexit1()
-func schedtrace(bool)
func freezetheworld()
// Get signal trampoline, written in C.
@@ -562,6 +551,30 @@ func getCgoHasExtraM() *bool {
return &cgoHasExtraM
}
+// Temporary for gccgo until we port proc.go.
+//go:linkname getAllP runtime.getAllP
+func getAllP() **p {
+ return &allp[0]
+}
+
+// Temporary for gccgo until we port proc.go.
+//go:linkname allocg runtime.allocg
+func allocg() *g {
+ return new(g)
+}
+
+// Temporary for gccgo until we port the garbage collector.
+//go:linkname getallglen runtime.getallglen
+func getallglen() uintptr {
+ return allglen
+}
+
+// Temporary for gccgo until we port the garbage collector.
+//go:linkname getallg runtime.getallg
+func getallg(i int) *g {
+ return allgs[i]
+}
+
// Throw and rethrow an exception.
func throwException()
func rethrowException()
@@ -579,3 +592,27 @@ func getPanicking() uint32 {
// Temporary for gccgo until we port mcache.go.
func allocmcache() *mcache
+
+// Temporary for gccgo until we port mgc.go.
+// This is just so that allgadd will compile.
+var work struct {
+ rescan struct {
+ lock mutex
+ list []guintptr
+ }
+}
+
+// gcount is temporary for gccgo until more of proc.go is ported.
+// This is a copy of the C function we used to use.
+func gcount() int32 {
+ n := int32(0)
+ lock(&allglock)
+ for _, gp := range allgs {
+ s := readgstatus(gp)
+ if s == _Grunnable || s == _Grunning || s == _Gsyscall || s == _Gwaiting {
+ n++
+ }
+ }
+ unlock(&allglock)
+ return n
+}
diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go
index f61f9a0f2dd..611aba91a4d 100644
--- a/libgo/go/runtime/traceback_gccgo.go
+++ b/libgo/go/runtime/traceback_gccgo.go
@@ -171,3 +171,58 @@ func isSystemGoroutine(gp *g) bool {
// FIXME.
return false
}
+
+func tracebackothers(me *g) {
+ var tb tracebackg
+ tb.gp = me
+
+ level, _, _ := gotraceback()
+
+ // Show the current goroutine first, if we haven't already.
+ g := getg()
+ gp := g.m.curg
+ if gp != nil && gp != me {
+ print("\n")
+ goroutineheader(gp)
+ gp.traceback = &tb
+ getTraceback(me, gp)
+ printtrace(tb.locbuf[:tb.c], nil)
+ printcreatedby(gp)
+ }
+
+ lock(&allglock)
+ for _, gp := range allgs {
+ if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp) && level < 2 {
+ continue
+ }
+ print("\n")
+ goroutineheader(gp)
+
+ // gccgo's only mechanism for doing a stack trace is
+ // _Unwind_Backtrace. And that only works for the
+ // current thread, not for other random goroutines.
+ // So we need to switch context to the goroutine, get
+ // the backtrace, and then switch back.
+ //
+ // This means that if g is running or in a syscall, we
+ // can't reliably print a stack trace. FIXME.
+
+ // Note: gp.m == g.m occurs when tracebackothers is
+ // called from a signal handler initiated during a
+ // systemstack call. The original G is still in the
+ // running state, and we want to print its stack.
+ if gp.m != g.m && readgstatus(gp)&^_Gscan == _Grunning {
+ print("\tgoroutine running on other thread; stack unavailable\n")
+ printcreatedby(gp)
+ } else if readgstatus(gp)&^_Gscan == _Gsyscall {
+ print("\tgoroutine in C code; stack unavailable\n")
+ printcreatedby(gp)
+ } else {
+ gp.traceback = &tb
+ getTraceback(me, gp)
+ printtrace(tb.locbuf[:tb.c], nil)
+ printcreatedby(gp)
+ }
+ }
+ unlock(&allglock)
+}