diff options
Diffstat (limited to 'libgo/go/runtime/chan.go')
-rw-r--r-- | libgo/go/runtime/chan.go | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go index de1d80acd30..b909d478d26 100644 --- a/libgo/go/runtime/chan.go +++ b/libgo/go/runtime/chan.go @@ -267,6 +267,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { gp.waiting = mysg gp.param = nil c.sendq.enqueue(mysg) + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + atomic.Store8(&gp.parkingOnChan, 1) gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2) // Ensure the value being sent is kept alive until the // receiver copies it out. The sudog has a pointer to the @@ -586,6 +591,11 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) mysg.c = c gp.param = nil c.recvq.enqueue(mysg) + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + atomic.Store8(&gp.parkingOnChan, 1) gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2) // someone woke us up @@ -663,7 +673,19 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool { // There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. + // Set activeStackChans here instead of before we try parking + // because we could self-deadlock in stack growth on the + // channel lock. gp.activeStackChans = true + // Mark that it's safe for stack shrinking to occur now, + // because any thread acquiring this G's stack for shrinking + // is guaranteed to observe activeStackChans after this store. + atomic.Store8(&gp.parkingOnChan, 0) + // Make sure we unlock after setting activeStackChans and + // unsetting parkingOnChan. The moment we unlock chanLock + // we risk gp getting readied by a channel operation and + // so gp could continue running before everything before + // the unlock is visible (even to gp itself). unlock((*mutex)(chanLock)) return true } |