summaryrefslogtreecommitdiff
path: root/libgo/runtime/netpoll.goc
diff options
context:
space:
mode:
authorian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2013-11-06 19:49:01 +0000
committerian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2013-11-06 19:49:01 +0000
commit0ce10ea1348e9afd5d0eec6bca986bfe58bac5ac (patch)
tree39530b071991b2326f881b2a30a2d82d6c133fd6 /libgo/runtime/netpoll.goc
parent57a8bf1b0c6057ccbacb0cf79eb84d1985c2c1fe (diff)
downloadgcc-0ce10ea1348e9afd5d0eec6bca986bfe58bac5ac.tar.gz
libgo: Update to October 24 version of master library.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@204466 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo/runtime/netpoll.goc')
-rw-r--r--libgo/runtime/netpoll.goc97
1 files changed, 69 insertions, 28 deletions
diff --git a/libgo/runtime/netpoll.goc b/libgo/runtime/netpoll.goc
index a0bd735f85c..02705734dd8 100644
--- a/libgo/runtime/netpoll.goc
+++ b/libgo/runtime/netpoll.goc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin linux
+// +build darwin dragonfly freebsd linux netbsd openbsd windows
package net
@@ -19,7 +19,7 @@ package net
// Integrated network poller (platform-independent part).
// A particular implementation (epoll/kqueue) must define the following functions:
// void runtime_netpollinit(void); // to initialize the poller
-// int32 runtime_netpollopen(int32 fd, PollDesc *pd); // to arm edge-triggered notifications
+// int32 runtime_netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
// and associate fd with pd.
// An implementation must call the following function to denote that the pd is ready.
// void runtime_netpollready(G **gpp, PollDesc *pd, int32 mode);
@@ -30,7 +30,7 @@ struct PollDesc
{
PollDesc* link; // in pollcache, protected by pollcache.Lock
Lock; // protectes the following fields
- int32 fd;
+ uintptr fd;
bool closing;
uintptr seq; // protects from stale timers and ready notifications
G* rg; // G waiting for read or READY (binary semaphore)
@@ -52,8 +52,8 @@ static struct
// seq is incremented when deadlines are changed or descriptor is reused.
} pollcache;
-static void netpollblock(PollDesc*, int32);
-static G* netpollunblock(PollDesc*, int32);
+static bool netpollblock(PollDesc*, int32);
+static G* netpollunblock(PollDesc*, int32, bool);
static void deadline(int64, Eface);
static void readDeadline(int64, Eface);
static void writeDeadline(int64, Eface);
@@ -68,7 +68,7 @@ func runtime_pollServerInit() {
runtime_netpollinit();
}
-func runtime_pollOpen(fd int) (pd *PollDesc, errno int) {
+func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
pd = allocPollDesc();
runtime_lock(pd);
if(pd->wg != nil && pd->wg != READY)
@@ -117,18 +117,35 @@ ret:
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
runtime_lock(pd);
err = checkerr(pd, mode);
- if(err)
- goto ret;
- netpollblock(pd, mode);
- err = checkerr(pd, mode);
-ret:
+ if(err == 0) {
+ while(!netpollblock(pd, mode)) {
+ err = checkerr(pd, mode);
+ if(err != 0)
+ break;
+ // Can happen if timeout has fired and unblocked us,
+ // but before we had a chance to run, timeout has been reset.
+ // Pretend it has not happened and retry.
+ }
+ }
+ runtime_unlock(pd);
+}
+
+func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
+ runtime_lock(pd);
+ // wait for ioready, ignore closing or timeouts.
+ while(!netpollblock(pd, mode))
+ ;
runtime_unlock(pd);
}
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
+ G *rg, *wg;
+
runtime_lock(pd);
- if(pd->closing)
- goto ret;
+ if(pd->closing) {
+ runtime_unlock(pd);
+ return;
+ }
pd->seq++; // invalidate current timers
// Reset current timers.
if(pd->rt.fv) {
@@ -140,9 +157,8 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
pd->wt.fv = nil;
}
// Setup new timers.
- if(d != 0 && d <= runtime_nanotime()) {
+ if(d != 0 && d <= runtime_nanotime())
d = -1;
- }
if(mode == 'r' || mode == 'r'+'w')
pd->rd = d;
if(mode == 'w' || mode == 'r'+'w')
@@ -172,8 +188,18 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
runtime_addtimer(&pd->wt);
}
}
-ret:
+ // If we set the new deadline in the past, unblock currently pending IO if any.
+ rg = nil;
+ wg = nil;
+ if(pd->rd < 0)
+ rg = netpollunblock(pd, 'r', false);
+ if(pd->wd < 0)
+ wg = netpollunblock(pd, 'w', false);
runtime_unlock(pd);
+ if(rg)
+ runtime_ready(rg);
+ if(wg)
+ runtime_ready(wg);
}
func runtime_pollUnblock(pd *PollDesc) {
@@ -184,8 +210,8 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime_throw("runtime_pollUnblock: already closing");
pd->closing = true;
pd->seq++;
- rg = netpollunblock(pd, 'r');
- wg = netpollunblock(pd, 'w');
+ rg = netpollunblock(pd, 'r', false);
+ wg = netpollunblock(pd, 'w', false);
if(pd->rt.fv) {
runtime_deltimer(&pd->rt);
pd->rt.fv = nil;
@@ -201,6 +227,12 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime_ready(wg);
}
+uintptr
+runtime_netpollfd(PollDesc *pd)
+{
+ return pd->fd;
+}
+
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
void
runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
@@ -210,9 +242,9 @@ runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
rg = wg = nil;
runtime_lock(pd);
if(mode == 'r' || mode == 'r'+'w')
- rg = netpollunblock(pd, 'r');
+ rg = netpollunblock(pd, 'r', true);
if(mode == 'w' || mode == 'r'+'w')
- wg = netpollunblock(pd, 'w');
+ wg = netpollunblock(pd, 'w', true);
runtime_unlock(pd);
if(rg) {
rg->schedlink = *gpp;
@@ -234,7 +266,8 @@ checkerr(PollDesc *pd, int32 mode)
return 0;
}
-static void
+// returns true if IO is ready, or false if timedout or closed
+static bool
netpollblock(PollDesc *pd, int32 mode)
{
G **gpp;
@@ -244,17 +277,20 @@ netpollblock(PollDesc *pd, int32 mode)
gpp = &pd->wg;
if(*gpp == READY) {
*gpp = nil;
- return;
+ return true;
}
if(*gpp != nil)
- runtime_throw("epoll: double wait");
+ runtime_throw("netpollblock: double wait");
*gpp = runtime_g();
runtime_park(runtime_unlock, &pd->Lock, "IO wait");
runtime_lock(pd);
+ if(runtime_g()->param)
+ return true;
+ return false;
}
static G*
-netpollunblock(PollDesc *pd, int32 mode)
+netpollunblock(PollDesc *pd, int32 mode, bool ioready)
{
G **gpp, *old;
@@ -264,10 +300,15 @@ netpollunblock(PollDesc *pd, int32 mode)
if(*gpp == READY)
return nil;
if(*gpp == nil) {
- *gpp = READY;
+ // Only set READY for ioready. runtime_pollWait
+ // will check for timeout/cancel before waiting.
+ if(ioready)
+ *gpp = READY;
return nil;
}
old = *gpp;
+ // pass unblock reason onto blocked g
+ old->param = (void*)(uintptr)ioready;
*gpp = nil;
return old;
}
@@ -296,14 +337,14 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write)
runtime_throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1;
pd->rt.fv = nil;
- rg = netpollunblock(pd, 'r');
+ rg = netpollunblock(pd, 'r', false);
}
if(write) {
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
runtime_throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1;
pd->wt.fv = nil;
- wg = netpollunblock(pd, 'w');
+ wg = netpollunblock(pd, 'w', false);
}
runtime_unlock(pd);
if(rg)
@@ -343,7 +384,7 @@ allocPollDesc(void)
n = 1;
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
- pd = runtime_SysAlloc(n*sizeof(*pd));
+ pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
for(i = 0; i < n; i++) {
pd[i].link = pollcache.first;
pollcache.first = &pd[i];