summaryrefslogtreecommitdiff
path: root/pr/src/md/unix
diff options
context:
space:
mode:
Diffstat (limited to 'pr/src/md/unix')
-rw-r--r--pr/src/md/unix/Makefile229
-rw-r--r--pr/src/md/unix/aix.c165
-rw-r--r--pr/src/md/unix/aixwrap.c46
-rw-r--r--pr/src/md/unix/bsdi.c93
-rw-r--r--pr/src/md/unix/freebsd.c232
-rw-r--r--pr/src/md/unix/hpux.c334
-rw-r--r--pr/src/md/unix/irix.c1308
-rw-r--r--pr/src/md/unix/linux.c230
-rw-r--r--pr/src/md/unix/ncr.c367
-rw-r--r--pr/src/md/unix/nec.c81
-rw-r--r--pr/src/md/unix/objs.mk176
-rw-r--r--pr/src/md/unix/os_AIX.s102
-rw-r--r--pr/src/md/unix/os_BSD_386_2.s54
-rw-r--r--pr/src/md/unix/os_ReliantUNIX.s108
-rw-r--r--pr/src/md/unix/os_SunOS.s51
-rw-r--r--pr/src/md/unix/os_SunOS_ultrasparc.s157
-rw-r--r--pr/src/md/unix/os_SunOS_x86.s60
-rw-r--r--pr/src/md/unix/osf1.c88
-rw-r--r--pr/src/md/unix/pthreads_user.c465
-rw-r--r--pr/src/md/unix/reliantunix.c114
-rw-r--r--pr/src/md/unix/scoos.c154
-rw-r--r--pr/src/md/unix/solaris.c808
-rw-r--r--pr/src/md/unix/sony.c90
-rw-r--r--pr/src/md/unix/sunos4.c77
-rw-r--r--pr/src/md/unix/unix.c3210
-rw-r--r--pr/src/md/unix/unix_errors.c1484
-rw-r--r--pr/src/md/unix/unixware.c548
-rw-r--r--pr/src/md/unix/uxproces.c715
-rw-r--r--pr/src/md/unix/uxwrap.c518
29 files changed, 12064 insertions, 0 deletions
diff --git a/pr/src/md/unix/Makefile b/pr/src/md/unix/Makefile
new file mode 100644
index 00000000..2c3a3f77
--- /dev/null
+++ b/pr/src/md/unix/Makefile
@@ -0,0 +1,229 @@
+#
+# The contents of this file are subject to the Netscape Public License
+# Version 1.0 (the "NPL"); you may not use this file except in
+# compliance with the NPL. You may obtain a copy of the NPL at
+# http://www.mozilla.org/NPL/
+#
+# Software distributed under the NPL is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+# for the specific language governing rights and limitations under the
+# NPL.
+#
+# The Initial Developer of this code under the NPL is Netscape
+# Communications Corporation. Portions created by Netscape are
+# Copyright (C) 1998 Netscape Communications Corporation. All Rights
+# Reserved.
+#
+
+MOD_DEPTH = ../../../..
+
+include $(MOD_DEPTH)/config/config.mk
+
+# Disable optimization of the nspr on SunOS4.1.3
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(OS_RELEASE),4.1.3_U1)
+OPTIMIZER =
+endif
+endif
+
+CSRCS = \
+ unix.c \
+ unix_errors.c \
+ uxproces.c \
+ uxwrap.c \
+ $(NULL)
+
+PTH_USER_CSRCS = \
+ pthreads_user.c \
+ $(NULL)
+
+IRIX_CSRCS = \
+ irix.c \
+ $(NULL)
+
+SUNOS4_CSRCS = \
+ sunos4.c \
+ $(NULL)
+
+SOLARIS_CSRCS = \
+ solaris.c \
+ $(NULL)
+
+AIX_CSRCS = \
+ aix.c \
+ $(NULL)
+
+FREEBSD_CSRCS = \
+ freebsd.c \
+ $(NULL)
+
+BSDI_CSRCS = \
+ bsdi.c \
+ $(NULL)
+
+HPUX_CSRCS = \
+ hpux.c \
+ $(NULL)
+
+OSF1_CSRCS = \
+ osf1.c \
+ $(NULL)
+
+LINUX_CSRCS = \
+ linux.c \
+ $(NULL)
+
+UNIXWARE_CSRCS = \
+ unixware.c \
+ $(NULL)
+
+RELIANTUNIX_CSRCS = \
+ reliantunix.c \
+ $(NULL)
+
+NEC_CSRCS = \
+ nec.c \
+ $(NULL)
+
+SONY_CSRCS = \
+ sony.c \
+ $(NULL)
+
+NCR_CSRCS = \
+ ncr.c \
+ $(NULL)
+
+SCOOS_CSRCS = \
+ scoos.c \
+ $(NULL)
+
+ifeq ($(PTHREADS_USER),1)
+CSRCS += $(PTH_USER_CSRCS)
+endif
+
+ifeq ($(OS_ARCH),IRIX)
+CSRCS += $(IRIX_CSRCS)
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(OS_RELEASE),4.1.3_U1)
+CSRCS += $(SUNOS4_CSRCS)
+else
+CSRCS += $(SOLARIS_CSRCS)
+endif
+endif
+
+ifeq ($(OS_ARCH),AIX)
+CSRCS += $(AIX_CSRCS)
+endif
+ifeq ($(OS_ARCH),FreeBSD)
+CSRCS += $(FREEBSD_CSRCS)
+endif
+ifeq ($(OS_ARCH),BSD_386)
+CSRCS += $(BSDI_CSRCS)
+endif
+ifeq ($(OS_ARCH),HP-UX)
+CSRCS += $(HPUX_CSRCS)
+endif
+ifeq ($(OS_ARCH),OSF1)
+CSRCS += $(OSF1_CSRCS)
+endif
+ifeq ($(OS_ARCH),Linux)
+CSRCS += $(LINUX_CSRCS)
+endif
+ifeq ($(OS_ARCH),UNIXWARE)
+CSRCS += $(UNIXWARE_CSRCS)
+endif
+ifeq ($(OS_ARCH),ReliantUNIX)
+CSRCS += $(RELIANTUNIX_CSRCS)
+endif
+ifeq ($(OS_ARCH),NEC)
+CSRCS += $(NEC_CSRCS)
+endif
+ifeq ($(OS_ARCH),NEWS-OS)
+CSRCS += $(SONY_CSRCS)
+endif
+ifeq ($(OS_ARCH),NCR)
+CSRCS += $(NCR_CSRCS)
+endif
+ifeq ($(OS_ARCH),SCO_SV)
+CSRCS += $(SCOOS_CSRCS)
+endif
+
+#
+# Some Unix platforms have an assembly language file.
+# E.g., AIX 3.2, Solaris (both sparc and x86).
+#
+ifeq ($(OS_ARCH), AIX)
+ ifeq ($(OS_RELEASE), 3.2)
+ ASFILES = os_$(OS_ARCH).s
+ endif
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+ ifeq ($(OS_TEST),i86pc)
+ ASFILES = os_$(OS_ARCH)_x86.s
+ else
+ ifneq ($(OS_RELEASE),4.1.3_U1)
+ ASFILES = os_$(OS_ARCH).s
+ endif
+ endif
+endif
+
+ifeq ($(OS_ARCH), ReliantUNIX)
+ ASFILES = os_$(OS_ARCH).s
+endif
+
+ifeq ($(OS_ARCH)$(OS_RELEASE),BSD_3862.1)
+ ASFILES = os_BSD_386_2.s
+endif
+
+TARGETS = $(OBJS)
+
+ifeq ($(OS_ARCH),AIX)
+ifneq ($(OS_RELEASE),4.2)
+ifneq ($(USE_PTHREADS), 1)
+#TARGETS += $(OBJDIR)/aixwrap.o
+endif
+endif
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+ ifneq ($(OS_RELEASE),4.1.3_U1)
+ ifeq ($(OS_TEST),sun4u)
+ LIBRARY_NAME = $(ULTRASPARC_LIBRARY)
+ LIBRARY_VERSION = $(MOD_VERSION)
+ ULTRASPARC_ASFILES = os_$(OS_ARCH)_ultrasparc.s
+ ULTRASPARC_ASOBJS = $(addprefix $(OBJDIR)/,$(ULTRASPARC_ASFILES:.s=.o))
+ TARGETS += $(ULTRASPARC_ASOBJS) $(SHARED_LIBRARY)
+ endif
+ endif
+endif
+
+INCLUDES = -I$(DIST)/include/private -I$(DIST)/include
+
+include $(MOD_DEPTH)/config/rules.mk
+
+export:: $(TARGETS)
+#ifeq ($(OS_ARCH),AIX)
+#ifneq ($(OS_RELEASE),4.2)
+#ifneq ($(USE_PTHREADS), 1)
+# $(INSTALL) -m 444 $(OBJDIR)/aixwrap.o $(DIST)/lib
+#endif
+#endif
+#endif
+ifeq ($(OS_ARCH),SunOS)
+ifneq ($(OS_RELEASE),4.1.3_U1)
+ifeq ($(OS_TEST),sun4u)
+$(SHARED_LIBRARY): $(ULTRASPARC_ASOBJS)
+ $(LD) -G -z text -o $@ $(ULTRASPARC_ASOBJS)
+ $(INSTALL) -m 444 $(SHARED_LIBRARY) $(DIST)/lib
+
+$(ULTRASPARC_ASOBJS): $(ULTRASPARC_ASFILES)
+ as -o $@ -K PIC -P -D_ASM -D__STDC__=0 -xarch=v8plus $(ULTRASPARC_ASFILES)
+endif
+endif
+endif
+
+
+install:: export
diff --git a/pr/src/md/unix/aix.c b/pr/src/md/unix/aix.c
new file mode 100644
index 00000000..8ae7e303
--- /dev/null
+++ b/pr/src/md/unix/aix.c
@@ -0,0 +1,165 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+/*
+ * NSPR 2.0 overrides the system select() and poll() functions.
+ * On AIX 4.2, we use dlopen("/unix", 0) and dlsym() to get to the
+ * original system select() and poll() functions.
+ */
+
+#ifndef AIX4_1
+
+#include <sys/select.h>
+#include <sys/poll.h>
+#include <dlfcn.h>
+
+static void *aix_handle = NULL;
+static int (*aix_select_fcn)() = NULL;
+static int (*aix_poll_fcn)() = NULL;
+
+int _MD_SELECT(int width, fd_set *r, fd_set *w, fd_set *e, struct timeval *t)
+{
+ int rv;
+
+ if (!aix_select_fcn) {
+ if (!aix_handle) {
+ aix_handle = dlopen("/unix",0);
+ if (!aix_handle) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ }
+ aix_select_fcn = (int(*)())dlsym(aix_handle,"select");
+ if (!aix_select_fcn) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ }
+ rv = (*aix_select_fcn)(width, r, w, e, t);
+ return rv;
+}
+
+int _MD_POLL(void *listptr, unsigned long nfds, long timeout)
+{
+ int rv;
+
+ if (!aix_poll_fcn) {
+ if (!aix_handle) {
+ aix_handle = dlopen("/unix",0);
+ if (!aix_handle) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ }
+ aix_poll_fcn = (int(*)())dlsym(aix_handle,"poll");
+ if (!aix_poll_fcn) {
+ PR_SetError(PR_UNKNOWN_ERROR, 0);
+ return -1;
+ }
+ }
+ rv = (*aix_poll_fcn)(listptr, nfds, timeout);
+ return rv;
+}
+
+#else
+
+/*
+ * In AIX versions prior to 4.2, we use the two-step rename/link trick.
+ * The binary must contain at least one "poll" symbol for linker's rename
+ * to work. So we must have this dummy function that references poll().
+ */
+#include <sys/poll.h>
+void _pr_aix_dummy()
+{
+ poll(0,0,0);
+}
+
+#endif /* !AIX4_1 */
+
+#if !defined(PTHREADS_USER)
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+PR_IMPLEMENT(void)
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for AIX */
+PR_IMPLEMENT(void)
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for AIX.");
+}
+#endif /* _PR_PTHREADS */
+#endif /* PTHREADS_USER */
diff --git a/pr/src/md/unix/aixwrap.c b/pr/src/md/unix/aixwrap.c
new file mode 100644
index 00000000..e5c0a630
--- /dev/null
+++ b/pr/src/md/unix/aixwrap.c
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+/*
+ * File: aixwrap.c
+ * Description:
+ * This file contains a single function, _MD_SELECT(), which simply
+ * invokes the select() function. This file is used in an ugly
+ * hack to override the system select() function on AIX releases
+ * prior to 4.2. (On AIX 4.2, we use a different mechanism to
+ * override select().)
+ */
+
+#ifndef AIX4_1
+#error aixwrap.c should only be used on AIX 4.1
+#else
+
+#include <sys/select.h>
+#include <sys/poll.h>
+
+int _MD_SELECT(int width, fd_set *r, fd_set *w, fd_set *e, struct timeval *t)
+{
+ return select(width, r, w, e, t);
+}
+
+int _MD_POLL(void *listptr, unsigned long nfds, long timeout)
+{
+ return poll(listptr, nfds, timeout);
+}
+
+#endif /* AIX4_1 */
diff --git a/pr/src/md/unix/bsdi.c b/pr/src/md/unix/bsdi.c
new file mode 100644
index 00000000..dae92a47
--- /dev/null
+++ b/pr/src/md/unix/bsdi.c
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+void _MD_EarlyInit(void)
+{
+ /*
+ * Ignore FPE because coercion of a NaN to an int causes SIGFPE
+ * to be raised.
+ */
+ struct sigaction act;
+
+ act.sa_handler = SIG_IGN;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGFPE, &act, 0);
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for BSDI */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for BSDI.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for BSDI.");
+ return PR_FAILURE;
+}
diff --git a/pr/src/md/unix/freebsd.c b/pr/src/md/unix/freebsd.c
new file mode 100644
index 00000000..2cedf308
--- /dev/null
+++ b/pr/src/md/unix/freebsd.c
@@ -0,0 +1,232 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+void _MD_EarlyInit(void)
+{
+ /*
+ * Ignore FPE because coercion of a NaN to an int causes SIGFPE
+ * to be raised.
+ */
+ struct sigaction act;
+
+ act.sa_handler = SIG_IGN;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGFPE, &act, 0);
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) sigsetjmp(CONTEXT(t), 1);
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for OSF1 */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for OSF1.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for OSF1.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
+
+#if defined(_PR_NEED_FAKE_POLL)
+
+#include <fcntl.h>
+
+int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
+{
+ int i;
+ int rv;
+ int maxfd;
+ fd_set rd, wr, ex;
+ struct timeval tv, *tvp;
+
+ if (timeout < 0 && timeout != -1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (timeout == -1) {
+ tvp = NULL;
+ } else {
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+ tvp = &tv;
+ }
+
+ maxfd = -1;
+ FD_ZERO(&rd);
+ FD_ZERO(&wr);
+ FD_ZERO(&ex);
+
+ for (i = 0; i < nfds; i++) {
+ int osfd = filedes[i].fd;
+ int events = filedes[i].events;
+ PRBool fdHasEvent = PR_FALSE;
+
+ if (osfd < 0) {
+ continue; /* Skip this osfd. */
+ }
+
+ /*
+ * Map the native poll flags to nspr poll flags.
+ * POLLIN, POLLRDNORM ===> PR_POLL_READ
+ * POLLOUT, POLLWRNORM ===> PR_POLL_WRITE
+ * POLLPRI, POLLRDBAND ===> PR_POLL_EXCEPTION
+ * POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
+ * are ignored.
+ *
+ * The output events POLLERR and POLLHUP are never turned on.
+ * POLLNVAL may be turned on.
+ */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ FD_SET(osfd, &rd);
+ fdHasEvent = PR_TRUE;
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ FD_SET(osfd, &wr);
+ fdHasEvent = PR_TRUE;
+ }
+ if (events & (POLLPRI | POLLRDBAND)) {
+ FD_SET(osfd, &ex);
+ fdHasEvent = PR_TRUE;
+ }
+ if (fdHasEvent && osfd > maxfd) {
+ maxfd = osfd;
+ }
+ }
+
+ rv = select(maxfd + 1, &rd, &wr, &ex, tvp);
+
+ /* Compute poll results */
+ if (rv > 0) {
+ rv = 0;
+ for (i = 0; i < nfds; i++) {
+ PRBool fdHasEvent = PR_FALSE;
+
+ filedes[i].revents = 0;
+ if (filedes[i].fd < 0) {
+ continue;
+ }
+ if (FD_ISSET(filedes[i].fd, &rd)) {
+ if (filedes[i].events & POLLIN) {
+ filedes[i].revents |= POLLIN;
+ }
+ if (filedes[i].events & POLLRDNORM) {
+ filedes[i].revents |= POLLRDNORM;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (FD_ISSET(filedes[i].fd, &wr)) {
+ if (filedes[i].events & POLLOUT) {
+ filedes[i].revents |= POLLOUT;
+ }
+ if (filedes[i].events & POLLWRNORM) {
+ filedes[i].revents |= POLLWRNORM;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (FD_ISSET(filedes[i].fd, &ex)) {
+ if (filedes[i].events & POLLPRI) {
+ filedes[i].revents |= POLLPRI;
+ }
+ if (filedes[i].events & POLLRDBAND) {
+ filedes[i].revents |= POLLRDBAND;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (fdHasEvent) {
+ rv++;
+ }
+ }
+ PR_ASSERT(rv > 0);
+ } else if (rv == -1 && errno == EBADF) {
+ rv = 0;
+ for (i = 0; i < nfds; i++) {
+ filedes[i].revents = 0;
+ if (filedes[i].fd < 0) {
+ continue;
+ }
+ if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) {
+ filedes[i].revents = POLLNVAL;
+ rv++;
+ }
+ }
+ PR_ASSERT(rv > 0);
+ }
+ PR_ASSERT(-1 != timeout || rv != 0);
+
+ return rv;
+}
+#endif /* _PR_NEED_FAKE_POLL */
diff --git a/pr/src/md/unix/hpux.c b/pr/src/md/unix/hpux.c
new file mode 100644
index 00000000..701a854b
--- /dev/null
+++ b/pr/src/md/unix/hpux.c
@@ -0,0 +1,334 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+#if defined(HPUX10_30) || defined(HPUX11)
+/* for fesettrapenable */
+#include <fenv.h>
+#else
+/* for fpsetmask */
+#include <math.h>
+#endif
+#include <setjmp.h>
+#include <signal.h>
+#include <values.h>
+
+/*
+** On HP-UX we need to define a SIGFPE handler because coercion of a
+** NaN to an int causes SIGFPE to be raised. Thanks to Marianne
+** Mueller and Doug Priest at SunSoft for this fix.
+**
+** Under DCE threads, sigaction() installs a per-thread signal handler,
+** so we use the sigvector() interface to install a process-wide
+** handler.
+*/
+
+#ifdef _PR_DCETHREADS
+static void
+CatchFPE(int sig, int code, struct sigcontext *scp)
+{
+ unsigned i, e;
+ int r, t;
+ int *source, *destination;
+
+ /* check excepting instructions */
+ for ( i = 0; i < 7; i++ ) {
+ e = *(i+&(scp->sc_sl.sl_ss.ss_frexcp1));
+ if ( e & 0xfc000000 != 0 ) {
+ if ((e & 0xf4017720) == 0x24010200) {
+ r = ((e >> 20) & 0x3e);
+ t = (e & 0x1f) << 1;
+ if (e & 0x08000000) {
+ r |= (e >> 7) & 1;
+ t |= (e >> 6) & 1;
+ }
+ source = (int *)(&scp->sc_sl.sl_ss.ss_frstat + r);
+ destination = (int *)(&scp->sc_sl.sl_ss.ss_frstat + t);
+ *destination = *source < 0 ? -MAXINT-1 : MAXINT;
+ }
+ }
+ *(i+&(scp->sc_sl.sl_ss.ss_frexcp1)) = 0;
+ }
+
+ /* clear T-bit */
+ scp->sc_sl.sl_ss.ss_frstat &= ~0x40;
+}
+#else /* _PR_DCETHREADS */
+static void
+CatchFPE(int sig, siginfo_t *info, void *context)
+{
+ ucontext_t *ucp = (ucontext_t *) context;
+ unsigned i, e;
+ int r, t;
+ int *source, *destination;
+
+ /* check excepting instructions */
+ for ( i = 0; i < 7; i++ ) {
+ e = *(i+&(ucp->uc_mcontext.ss_frexcp1));
+ if ( e & 0xfc000000 != 0 ) {
+ if ((e & 0xf4017720) == 0x24010200) {
+ r = ((e >> 20) & 0x3e);
+ t = (e & 0x1f) << 1;
+ if (e & 0x08000000) {
+ r |= (e >> 7) & 1;
+ t |= (e >> 6) & 1;
+ }
+ source = (int *)(&ucp->uc_mcontext.ss_frstat + r);
+ destination = (int *)(&ucp->uc_mcontext.ss_frstat + t);
+ *destination = *source < 0 ? -MAXINT-1 : MAXINT;
+ }
+ }
+ *(i+&(ucp->uc_mcontext.ss_frexcp1)) = 0;
+ }
+
+ /* clear T-bit */
+ ucp->uc_mcontext.ss_frstat &= ~0x40;
+}
+#endif /* _PR_DCETHREADS */
+
+void _MD_hpux_install_sigfpe_handler(void)
+{
+#ifdef _PR_DCETHREADS
+ struct sigvec v;
+
+ v.sv_handler = CatchFPE;
+ v.sv_mask = 0;
+ v.sv_flags = 0;
+ sigvector(SIGFPE, &v, NULL);
+#else
+ struct sigaction act;
+
+ sigaction(SIGFPE, NULL, &act);
+ act.sa_flags |= SA_SIGINFO;
+ act.sa_sigaction = CatchFPE;
+ sigaction(SIGFPE, &act, NULL);
+#endif /* _PR_DCETHREADS */
+
+#if defined(HPUX10_30) || defined(HPUX11)
+ fesettrapenable(FE_INVALID);
+#else
+ fpsetmask(FP_X_INV);
+#endif
+}
+
+#if !defined(PTHREADS_USER)
+
+void _MD_EarlyInit(void)
+{
+ _MD_hpux_install_sigfpe_handler();
+
+#ifndef _PR_PTHREADS
+ /*
+ * The following piece of code is taken from ns/nspr/src/md_HP-UX.c.
+ * In the comment for revision 1.6, dated 1995/09/11 23:33:34,
+ * robm says:
+ * This version has some problems which need to be addressed.
+ * First, intercept all system calls and prevent them from
+ * executing the library code which performs stack switches
+ * before normal system call invocation. In order for library
+ * calls which make system calls to work (like stdio), however,
+ * we must also allocate our own stack and switch the primordial
+ * stack to use it. This isn't so bad, except that I fudged the
+ * backtrace length when copying the old stack to the new one.
+ *
+ * This is the original comment of robm in the code:
+ * XXXrobm Horrific. To avoid a problem with HP's system call
+ * code, we allocate a new stack for the primordial thread and
+ * use it. However, we don't know how far back the original stack
+ * goes. We should create a routine that performs a backtrace and
+ * finds out just how much we need to copy. As a temporary measure,
+ * I just copy an arbitrary guess.
+ *
+ * In an email to servereng dated 2 Jan 1997, Mike Patnode (mikep)
+ * suggests that this only needs to be done for HP-UX 9.
+ */
+#ifdef HPUX9
+#define PIDOOMA_STACK_SIZE 524288
+#define BACKTRACE_SIZE 8192
+ {
+ jmp_buf jb;
+ char *newstack;
+ char *oldstack;
+
+ if(!setjmp(jb)) {
+ newstack = (char *) PR_MALLOC(PIDOOMA_STACK_SIZE);
+ oldstack = (char *) (*(((int *) jb) + 1) - BACKTRACE_SIZE);
+ memcpy(newstack, oldstack, BACKTRACE_SIZE);
+ *(((int *) jb) + 1) = (int) (newstack + BACKTRACE_SIZE);
+ longjmp(jb, 1);
+ }
+ }
+#endif /* HPUX9 */
+#endif /* !_PR_PTHREADS */
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for HP-UX */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for HP-UX.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for HP-UX.");
+}
+#endif /* _PR_PTHREADS */
+
+void
+_MD_suspend_thread(PRThread *thread)
+{
+#ifdef _PR_PTHREADS
+#endif
+}
+
+void
+_MD_resume_thread(PRThread *thread)
+{
+#ifdef _PR_PTHREADS
+#endif
+}
+#endif /* PTHREADS_USER */
+
+/*
+ * See if we have the privilege to set the scheduling policy and
+ * priority of threads. Returns 0 if privilege is available.
+ * Returns EPERM otherwise.
+ */
+
+#if defined(_PR_PTHREADS) && !defined(_PR_DCETHREADS)
+PRIntn pt_hpux_privcheck()
+{
+ PRIntn policy;
+ struct sched_param schedule;
+ PRIntn rv;
+ pthread_t me = pthread_self();
+
+ rv = pthread_getschedparam(me, &policy, &schedule);
+ PR_ASSERT(0 == rv);
+ rv = pthread_setschedparam(me, policy, &schedule);
+ PR_ASSERT(0 == rv || EPERM == rv);
+ return rv;
+}
+#endif
+
+/*
+ * The HP version of strchr is buggy. It looks past the end of the
+ * string and causes a segmentation fault when our (NSPR) version
+ * of malloc is used.
+ *
+ * A better solution might be to put a cushion in our malloc just in
+ * case HP's version of strchr somehow gets used instead of this one.
+ */
+char *
+strchr(const char *s, int c)
+{
+ char ch;
+
+ if (!s) {
+ return NULL;
+ }
+
+ ch = (char) c;
+
+ while ((*s) && ((*s) != ch)) {
+ s++;
+ }
+
+ if ((*s) == ch) {
+ return (char *) s;
+ }
+
+ return NULL;
+}
+
+/*
+ * Implemementation of memcmp in HP-UX (verified on releases A.09.03,
+ * A.09.07, and B.10.10) dumps core if called with:
+ * 1. First operand with address = 1(mod 4).
+ * 2. Size = 1(mod 4)
+ * 3. Last byte of the second operand is the last byte of the page and
+ * next page is not accessible(not mapped or protected)
+ * Thus, using the following naive version (tons of optimizations are
+ * possible;^)
+ */
+
+int memcmp(const void *s1, const void *s2, size_t n)
+{
+ register unsigned char *p1 = (unsigned char *) s1,
+ *p2 = (unsigned char *) s2;
+
+ while (n-- > 0) {
+ register int r = ((int) ((unsigned int) *p1))
+ - ((int) ((unsigned int) *p2));
+ if (r) return r;
+ p1++; p2++;
+ }
+ return 0;
+}
diff --git a/pr/src/md/unix/irix.c b/pr/src/md/unix/irix.c
new file mode 100644
index 00000000..58ebe922
--- /dev/null
+++ b/pr/src/md/unix/irix.c
@@ -0,0 +1,1308 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syssgi.h>
+#include <sys/time.h>
+#include <sys/immu.h>
+#include <sys/utsname.h>
+#include <sys/sysmp.h>
+#include <sys/pda.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <sys/resource.h>
+#include <sys/procfs.h>
+#include <task.h>
+
+static void _MD_IntervalInit(void);
+
+#if defined(_PR_PTHREADS)
+#else /* defined(_PR_PTHREADS) */
+
+char *_nspr_sproc_private; /* ptr. to private region in every sproc */
+
+extern PRUintn _pr_numCPU;
+
+typedef struct nspr_arena {
+ PRCList links;
+ usptr_t *usarena;
+} nspr_arena;
+
+#define ARENA_PTR(qp) \
+ ((nspr_arena *) ((char*) (qp) - offsetof(nspr_arena , links)))
+
+static usptr_t *alloc_new_arena(void);
+
+PRCList arena_list = PR_INIT_STATIC_CLIST(&arena_list);
+ulock_t arena_list_lock;
+nspr_arena first_arena;
+int _nspr_irix_arena_cnt = 1;
+
+long _nspr_irix_lock_cnt = 0;
+long _nspr_irix_sem_cnt = 0;
+long _nspr_irix_pollsem_cnt = 0;
+
+usptr_t *_pr_usArena;
+ulock_t _pr_heapLock;
+
+usema_t *_pr_irix_exit_sem;
+PRInt32 _pr_irix_exit_now = 0;
+
+
+#define _NSPR_DEF_INITUSERS 100 /* default value of CONF_INITUSERS */
+#define _NSPR_DEF_INITSIZE (4 * 1024 * 1024) /* 4 MB */
+
+int _irix_initusers = _NSPR_DEF_INITUSERS;
+int _irix_initsize = _NSPR_DEF_INITSIZE;
+
+PRIntn _pr_io_in_progress, _pr_clock_in_progress;
+
+PRInt32 _pr_md_irix_sprocs_created, _pr_md_irix_sprocs_failed;
+PRInt32 _pr_md_irix_sprocs = 1;
+PRCList _pr_md_irix_sproc_list =
+PR_INIT_STATIC_CLIST(&_pr_md_irix_sproc_list);
+
+sigset_t ints_off;
+extern sigset_t timer_set;
+
+#if !defined(PR_SETABORTSIG)
+#define PR_SETABORTSIG 18
+#endif
+/*
+ * terminate the entire application if any sproc exits abnormally
+ */
+PRBool _nspr_terminate_on_error = PR_TRUE;
+
+/*
+ * exported interface to set the shared arena parameters
+ */
+void _PR_Irix_Set_Arena_Params(PRInt32 initusers, PRInt32 initsize)
+{
+ _irix_initusers = initusers;
+ _irix_initsize = initsize;
+}
+
+static usptr_t *alloc_new_arena()
+{
+ return(usinit("/dev/zero"));
+}
+
+static PRStatus new_poll_sem(struct _MDThread *mdthr, int val)
+{
+PRIntn _is;
+PRStatus rv = PR_SUCCESS;
+usema_t *sem = NULL;
+PRCList *qp;
+nspr_arena *arena;
+usptr_t *irix_arena;
+PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ _PR_LOCK(arena_list_lock);
+ for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
+ arena = ARENA_PTR(qp);
+ sem = usnewpollsema(arena->usarena, val);
+ if (sem != NULL) {
+ mdthr->cvar_pollsem = sem;
+ mdthr->pollsem_arena = arena->usarena;
+ break;
+ }
+ }
+ if (sem == NULL) {
+ /*
+ * If no space left in the arena allocate a new one.
+ */
+ if (errno == ENOMEM) {
+ arena = PR_NEWZAP(nspr_arena);
+ if (arena != NULL) {
+ irix_arena = alloc_new_arena();
+ if (irix_arena) {
+ PR_APPEND_LINK(&arena->links, &arena_list);
+ _nspr_irix_arena_cnt++;
+ arena->usarena = irix_arena;
+ sem = usnewpollsema(arena->usarena, val);
+ if (sem != NULL) {
+ mdthr->cvar_pollsem = sem;
+ mdthr->pollsem_arena = arena->usarena;
+ } else
+ rv = PR_FAILURE;
+ } else {
+ PR_DELETE(arena);
+ rv = PR_FAILURE;
+ }
+
+ } else
+ rv = PR_FAILURE;
+ } else
+ rv = PR_FAILURE;
+ }
+ _PR_UNLOCK(arena_list_lock);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ if (rv == PR_SUCCESS)
+ _MD_ATOMIC_INCREMENT(&_nspr_irix_pollsem_cnt);
+ return rv;
+}
+
+static void free_poll_sem(struct _MDThread *mdthr)
+{
+PRIntn _is;
+PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ usfreepollsema(mdthr->cvar_pollsem, mdthr->pollsem_arena);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ _MD_ATOMIC_DECREMENT(&_nspr_irix_pollsem_cnt);
+}
+
+static PRStatus new_lock(struct _MDLock *lockp)
+{
+PRIntn _is;
+PRStatus rv = PR_SUCCESS;
+ulock_t lock = NULL;
+PRCList *qp;
+nspr_arena *arena;
+usptr_t *irix_arena;
+PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ _PR_LOCK(arena_list_lock);
+ for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
+ arena = ARENA_PTR(qp);
+ lock = usnewlock(arena->usarena);
+ if (lock != NULL) {
+ lockp->lock = lock;
+ lockp->arena = arena->usarena;
+ break;
+ }
+ }
+ if (lock == NULL) {
+ /*
+ * If no space left in the arena allocate a new one.
+ */
+ if (errno == ENOMEM) {
+ arena = PR_NEWZAP(nspr_arena);
+ if (arena != NULL) {
+ irix_arena = alloc_new_arena();
+ if (irix_arena) {
+ PR_APPEND_LINK(&arena->links, &arena_list);
+ _nspr_irix_arena_cnt++;
+ arena->usarena = irix_arena;
+ lock = usnewlock(irix_arena);
+ if (lock != NULL) {
+ lockp->lock = lock;
+ lockp->arena = arena->usarena;
+ } else
+ rv = PR_FAILURE;
+ } else {
+ PR_DELETE(arena);
+ rv = PR_FAILURE;
+ }
+
+ } else
+ rv = PR_FAILURE;
+ } else
+ rv = PR_FAILURE;
+ }
+ _PR_UNLOCK(arena_list_lock);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ if (rv == PR_SUCCESS)
+ _MD_ATOMIC_INCREMENT(&_nspr_irix_lock_cnt);
+ return rv;
+}
+
+static void free_lock(struct _MDLock *lockp)
+{
+PRIntn _is;
+PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ usfreelock(lockp->lock, lockp->arena);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+ _MD_ATOMIC_DECREMENT(&_nspr_irix_lock_cnt);
+}
+
+void _MD_FREE_LOCK(struct _MDLock *lockp)
+{
+ PRIntn _is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ free_lock(lockp);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+}
+
+/*
+ * _MD_get_attached_thread
+ * Return the thread pointer of the current thread if it is attached.
+ *
+ * This function is needed for Irix because the thread-local-storage is
+ * implemented by mmapin'g a page with the MAP_LOCAL flag. This causes the
+ * sproc-private page to inherit contents of the page of the caller of sproc().
+ */
+PRThread *_MD_get_attached_thread(void)
+{
+ if (_MD_GET_SPROC_PID() == getpid())
+ return _PR_MD_CURRENT_THREAD();
+ else
+ return 0;
+}
+
+PRStatus _MD_NEW_LOCK(struct _MDLock *lockp)
+{
+ PRStatus rv;
+ PRIntn is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ rv = new_lock(lockp);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return rv;
+}
+
+static void
+sigchld_handler(int sig)
+{
+ pid_t pid;
+ int status;
+
+ /*
+ * If an sproc exited abnormally send a SIGKILL signal to all the
+ * sprocs in the process to terminate the application
+ */
+ while ((pid = waitpid(0, &status, WNOHANG)) > 0) {
+ if (WIFSIGNALED(status) && ((WTERMSIG(status) == SIGSEGV) ||
+ (WTERMSIG(status) == SIGBUS) ||
+ (WTERMSIG(status) == SIGABRT) ||
+ (WTERMSIG(status) == SIGILL)))
+
+ prctl(PR_SETEXITSIG, SIGKILL);
+ exit(status);
+ }
+}
+
+static void save_context_and_block(int sig)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+_PRCPU *cpu = _PR_MD_CURRENT_CPU();
+
+ /*
+ * save context
+ */
+ (void) setjmp(me->md.jb);
+ /*
+ * unblock the suspending thread
+ */
+ if (me->cpu) {
+ /*
+ * I am a cpu thread, not a user-created GLOBAL thread
+ */
+ unblockproc(cpu->md.suspending_id);
+ } else {
+ unblockproc(me->md.suspending_id);
+ }
+ /*
+ * now, block current thread
+ */
+ blockproc(getpid());
+}
+
+/*
+** The irix kernel has a bug in it which causes async connect's which are
+** interrupted by a signal to fail terribly (EADDRINUSE is returned).
+** We work around the bug by blocking signals during the async connect
+** attempt.
+*/
+PRInt32 _MD_irix_connect(
+ PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout)
+{
+ PRInt32 rv;
+ sigset_t oldset;
+
+ sigprocmask(SIG_BLOCK, &ints_off, &oldset);
+ rv = connect(osfd, addr, addrlen);
+ sigprocmask(SIG_SETMASK, &oldset, 0);
+
+ return(rv);
+}
+
+#include "prprf.h"
+
+/********************************************************************/
+/********************************************************************/
+/*************** Various thread like things for IRIX ****************/
+/********************************************************************/
+/********************************************************************/
+
+void *_MD_GetSP(PRThread *t)
+{
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ void *sp;
+
+ if (me == t)
+ (void) setjmp(t->md.jb);
+
+ sp = (void *)(t->md.jb[JB_SP]);
+ PR_ASSERT((sp >= (void *) t->stack->stackBottom) &&
+ (sp <= (void *) (t->stack->stackBottom + t->stack->stackSize)));
+ return(sp);
+}
+
+void _MD_InitLocks()
+{
+ char buf[200];
+ char *init_users, *init_size;
+
+ PR_snprintf(buf, sizeof(buf), "/dev/zero");
+
+ if (init_users = getenv("_NSPR_IRIX_INITUSERS"))
+ _irix_initusers = atoi(init_users);
+
+ if (init_size = getenv("_NSPR_IRIX_INITSIZE"))
+ _irix_initsize = atoi(init_size);
+
+ usconfig(CONF_INITUSERS, _irix_initusers);
+ usconfig(CONF_INITSIZE, _irix_initsize);
+ usconfig(CONF_AUTOGROW, 1);
+ usconfig(CONF_AUTORESV, 1);
+ if (usconfig(CONF_ARENATYPE, US_SHAREDONLY) < 0) {
+ perror("PR_Init: unable to config mutex arena");
+ exit(-1);
+ }
+
+ _pr_usArena = usinit(buf);
+ if (!_pr_usArena) {
+ fprintf(stderr,
+ "PR_Init: Error - unable to create lock/monitor arena\n");
+ exit(-1);
+ }
+ _pr_heapLock = usnewlock(_pr_usArena);
+
+ arena_list_lock = usnewlock(_pr_usArena);
+ _nspr_irix_lock_cnt = 3;
+
+ _pr_irix_exit_sem = usnewsema(_pr_usArena, 0);
+ _nspr_irix_sem_cnt = 1;
+
+ first_arena.usarena = _pr_usArena;
+ PR_INIT_CLIST(&first_arena.links);
+ PR_APPEND_LINK(&first_arena.links, &arena_list);
+}
+
+/* _PR_IRIX_CHILD_PROCESS is a private API for Server group */
+void _PR_IRIX_CHILD_PROCESS()
+{
+ PR_ASSERT(_PR_MD_CURRENT_CPU() == _pr_primordialCPU);
+ PR_ASSERT(_pr_numCPU == 1);
+ PR_ASSERT((_pr_userActive + _pr_systemActive) == 2);
+ /*
+ * save the new pid
+ */
+ _pr_primordialCPU->md.id = getpid();
+}
+
+static PRStatus pr_cvar_wait_sem(PRThread *thread, PRIntervalTime timeout)
+{
+ struct timeval tv, *tvp;
+ fd_set rd;
+ int rv;
+
+ if(timeout == PR_INTERVAL_NO_TIMEOUT) tvp = NULL;
+ else {
+ tv.tv_sec = PR_IntervalToSeconds(timeout);
+ tv.tv_usec = PR_IntervalToMicroseconds(
+ timeout - PR_SecondsToInterval(tv.tv_sec));
+ tvp = &tv;
+ }
+ FD_ZERO(&rd);
+ FD_SET(thread->md.cvar_pollsemfd, &rd);
+ /*
+ * call uspsema only if a previous select call on this semaphore
+ * did not timeout
+ */
+ if (!thread->md.cvar_pollsem_select) {
+ rv = _PR_WAIT_SEM(thread->md.cvar_pollsem);
+ PR_ASSERT(rv >= 0);
+ } else
+ rv = 0;
+again:
+ if(!rv) {
+ rv = _MD_SELECT(thread->md.cvar_pollsemfd + 1, &rd,
+ NULL,NULL,tvp);
+ if ((rv == -1) && (errno == EINTR)) {
+ rv = 0;
+ goto again;
+ }
+ PR_ASSERT(rv >= 0);
+ }
+
+ if (rv > 0) {
+ /*
+ * acquired the semaphore, call uspsema next time
+ */
+ thread->md.cvar_pollsem_select = 0;
+ return PR_SUCCESS;
+ } else {
+ /*
+ * select timed out; must call select, not uspsema, when trying
+ * to acquire the semaphore the next time
+ */
+ thread->md.cvar_pollsem_select = 1;
+ return PR_FAILURE;
+ }
+}
+
+PRStatus _MD_wait(PRThread *thread, PRIntervalTime ticks)
+{
+ if ( thread->flags & _PR_GLOBAL_SCOPE ) {
+ _MD_CHECK_FOR_EXIT();
+ if (pr_cvar_wait_sem(thread, ticks) == PR_FAILURE) {
+ _MD_CHECK_FOR_EXIT();
+ /*
+ * wait timed out
+ */
+ _PR_THREAD_LOCK(thread);
+ if (thread->wait.cvar) {
+ /*
+ * The thread will remove itself from the waitQ
+ * of the cvar in _PR_WaitCondVar
+ */
+ thread->wait.cvar = NULL;
+ thread->state = _PR_RUNNING;
+ _PR_THREAD_UNLOCK(thread);
+ } else {
+ _PR_THREAD_UNLOCK(thread);
+ /*
+ * This thread was woken up by a notifying thread
+ * at the same time as a timeout; so, consume the
+ * extra post operation on the semaphore
+ */
+ _MD_CHECK_FOR_EXIT();
+ pr_cvar_wait_sem(thread, PR_INTERVAL_NO_TIMEOUT);
+ }
+ _MD_CHECK_FOR_EXIT();
+ }
+ } else {
+ _PR_MD_SWITCH_CONTEXT(thread);
+ }
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_WakeupWaiter(PRThread *thread)
+{
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 pid, rv;
+ PRIntn is;
+
+ PR_ASSERT(_pr_md_idle_cpus >= 0);
+ if (thread == NULL) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ } else if (!_PR_IS_NATIVE_THREAD(thread)) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ } else {
+ PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ _MD_CVAR_POST_SEM(thread);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ }
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_CreateThread(PRThread *thread,
+void (*start)(void *),
+PRThreadPriority priority,
+PRThreadScope scope,
+PRThreadState state,
+PRUint32 stackSize)
+{
+ typedef void (*SprocEntry) (void *, size_t);
+ SprocEntry spentry = (SprocEntry)start;
+ PRIntn is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 pid;
+ PRStatus rv;
+
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ thread->md.cvar_pollsem_select = 0;
+ thread->flags |= _PR_GLOBAL_SCOPE;
+ pid = sprocsp(
+ spentry, /* startup func */
+ PR_SALL, /* attribute flags */
+ (void *)thread, /* thread param */
+ NULL, /* stack address */
+ stackSize); /* stack size */
+ if (pid > 0) {
+ /*
+ * Wait for the sproc to signal me after it has initialized
+ * itself
+ */
+ if (!_PR_IS_NATIVE_THREAD(me))
+ blockproc(me->cpu->md.id);
+ else
+ blockproc(me->md.id);
+ if (thread->md.cvar_pollsemfd < 0) {
+ /*
+ * the sproc failed to create a polled semaphore and exited
+ */
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_failed);
+ rv = PR_FAILURE;
+ } else {
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_created);
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs);
+ rv = PR_SUCCESS;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return rv;
+ } else {
+ _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_failed);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+}
+
+void _MD_CleanThread(PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ close(thread->md.cvar_pollsemfd);
+ thread->md.cvar_pollsemfd = -1;
+ free_poll_sem(&thread->md);
+ thread->md.cvar_pollsem = NULL;
+ }
+}
+
+void _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
+{
+ return;
+}
+
+extern void _MD_unix_terminate_waitpid_daemon(void);
+
+void
+_MD_CleanupBeforeExit(void)
+{
+ extern PRInt32 _pr_cpus_exit;
+
+ _MD_unix_terminate_waitpid_daemon();
+
+ _pr_irix_exit_now = 1;
+ if (_pr_numCPU > 1) {
+ /*
+ * Set a global flag, and wakeup all cpus which will notice the flag
+ * and exit.
+ */
+ _pr_cpus_exit = getpid();
+ _MD_Wakeup_CPUs();
+ while(_pr_numCPU > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _pr_numCPU--;
+ }
+ }
+ /*
+ * cause global threads on the recycle list to exit
+ */
+ _PR_DEADQ_LOCK;
+ if (_PR_NUM_DEADNATIVE != 0) {
+ PRThread *thread;
+ PRCList *ptr;
+
+ ptr = _PR_DEADNATIVEQ.next;
+ while( ptr != &_PR_DEADNATIVEQ ) {
+ thread = _PR_THREAD_PTR(ptr);
+ _MD_CVAR_POST_SEM(thread);
+ ptr = ptr->next;
+ }
+ }
+ _PR_DEADQ_UNLOCK;
+ while(_PR_NUM_DEADNATIVE > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _PR_DEC_DEADNATIVE;
+ }
+}
+
+#ifndef IRIX5_3
+extern void __sgi_prda_procmask(int);
+#endif
+
+PRStatus
+_MD_InitThread(PRThread *thread, PRBool wakeup_parent)
+{
+ struct sigaction sigact;
+ PRStatus rv = PR_SUCCESS;
+
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ /*
+ * create a polled semaphore
+ *
+ * NOTE: On Irix there is a bug which requires the sproc that
+ * created a polled semaphore to not exit for that semaphore
+ * to be useable by other sprocs.
+ */
+ thread->md.id = getpid();
+ thread->md.cvar_pollsemfd = -1;
+ if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
+ if (wakeup_parent == PR_TRUE)
+ unblockproc(getppid());
+ rv = PR_FAILURE;
+ }
+ thread->md.cvar_pollsemfd =
+ _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
+ if ((thread->md.cvar_pollsemfd < 0)) {
+ free_poll_sem(&thread->md);
+ if (wakeup_parent == PR_TRUE)
+ unblockproc(getppid());
+ rv = PR_FAILURE;
+ }
+ setblockproccnt(thread->md.id, 0);
+ _MD_SET_SPROC_PID(getpid());
+#ifndef IRIX5_3
+ /*
+ * enable user-level processing of sigprocmask(); this is an
+ * undocumented feature available in Irix 6.2, 6.3, 6.4 and 6.5
+ */
+ __sgi_prda_procmask(USER_LEVEL);
+#endif
+ /*
+ * set up SIGUSR1 handler; this is used to save state
+ * during PR_SuspendAll
+ */
+ sigact.sa_handler = save_context_and_block;
+ sigact.sa_flags = SA_RESTART;
+ /*
+ * Must mask clock interrupts
+ */
+ sigact.sa_mask = timer_set;
+ sigaction(SIGUSR1, &sigact, 0);
+
+
+ if (_nspr_terminate_on_error) {
+ /*
+ * PR_SETABORTSIG is a new command implemented in a patch to
+ * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
+ * sprocs in the process when one of them terminates abnormally
+ *
+ */
+
+ if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
+ /*
+ * if (errno == EINVAL)
+ *
+ * PR_SETABORTSIG not supported under this OS.
+ * You may want to get a recent kernel rollup patch that
+ * supports this feature.
+ */
+ /*
+ * SIGCLD handler for detecting abormally-terminating
+ * sprocs
+ */
+ sigact.sa_handler = sigchld_handler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = ints_off;
+ sigaction(SIGCLD, &sigact, NULL);
+ }
+ }
+ /*
+ * unblock the parent sproc
+ */
+ if (wakeup_parent == PR_TRUE)
+ unblockproc(getppid());
+ }
+ return rv;
+}
+
+PR_EXTERN(void ) _MD_exit(PRIntn status)
+{
+ /*
+ * Cause SIGKILL to be sent to other sprocs, if any, in the application
+ */
+ prctl(PR_SETEXITSIG, SIGKILL);
+ exit(status);
+}
+
+void
+_MD_InitRunningCPU(_PRCPU *cpu)
+{
+ extern int _pr_md_pipefd[2];
+
+ _MD_unix_init_running_cpu(cpu);
+ cpu->md.id = getpid();
+ _MD_SET_SPROC_PID(getpid());
+ if (_pr_md_pipefd[0] >= 0) {
+ _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
+ FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
+ }
+}
+
+void
+_MD_ExitThread(PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ _MD_ATOMIC_DECREMENT(&_pr_md_irix_sprocs);
+ _MD_CLEAN_THREAD(thread);
+ _MD_SET_CURRENT_THREAD(NULL);
+ }
+}
+
+void
+_MD_SuspendCPU(_PRCPU *cpu)
+{
+ PRInt32 rv;
+
+ cpu->md.suspending_id = getpid();
+ rv = kill(cpu->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+
+}
+
+void
+_MD_ResumeCPU(_PRCPU *cpu)
+{
+ unblockproc(cpu->md.id);
+}
+
+#if 0
+/*
+ * save the register context of a suspended sproc
+ */
+void get_context(PRThread *thr)
+{
+ int len, fd;
+ char pidstr[24];
+ char path[24];
+
+ /*
+ * open the file corresponding to this process in procfs
+ */
+ sprintf(path,"/proc/%s","00000");
+ len = strlen(path);
+ sprintf(pidstr,"%d",thr->md.id);
+ len -= strlen(pidstr);
+ sprintf(path + len,"%s",pidstr);
+ fd = open(path,O_RDONLY);
+ if (fd >= 0) {
+ (void) ioctl(fd, PIOCGREG, thr->md.gregs);
+ close(fd);
+ }
+ return;
+}
+#endif /* 0 */
+
+void
+_MD_SuspendThread(PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ (thread->flags & _PR_GCABLE_THREAD));
+
+ thread->md.suspending_id = getpid();
+ rv = kill(thread->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+}
+
+void
+_MD_ResumeThread(PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ (thread->flags & _PR_GCABLE_THREAD));
+ rv = unblockproc(thread->md.id);
+}
+
+/*
+ * return the set of processors available for scheduling procs in the
+ * "mask" argument
+ */
+PRInt32 _MD_GetThreadAffinityMask(PRThread *unused, PRUint32 *mask)
+{
+ PRInt32 nprocs, rv;
+ struct pda_stat *pstat;
+#define MAX_PROCESSORS 32
+
+ nprocs = sysmp(MP_NPROCS);
+ if (nprocs < 0)
+ return(-1);
+ pstat = PR_MALLOC(sizeof(struct pda_stat) * nprocs);
+ if (pstat == NULL)
+ return(-1);
+ rv = sysmp(MP_STAT, pstat);
+ if (rv < 0) {
+ PR_DELETE(pstat);
+ return(-1);
+ }
+ /*
+ * look at the first 32 cpus
+ */
+ nprocs = (nprocs > MAX_PROCESSORS) ? MAX_PROCESSORS : nprocs;
+ *mask = 0;
+ while (nprocs) {
+ if ((pstat->p_flags & PDAF_ENABLED) &&
+ !(pstat->p_flags & PDAF_ISOLATED)) {
+ *mask |= (1 << pstat->p_cpuid);
+ }
+ nprocs--;
+ pstat++;
+ }
+ return 0;
+}
+
+static char *_thr_state[] = {
+ "UNBORN",
+ "RUNNABLE",
+ "RUNNING",
+ "LOCK_WAIT",
+ "COND_WAIT",
+ "JOIN_WAIT",
+ "IO_WAIT",
+ "SUSPENDED",
+ "DEAD"
+};
+
+_PR_List_Threads()
+{
+ PRThread *thr;
+ void *handle;
+ struct _PRCPU *cpu;
+ PRCList *qp;
+ int len, status, rv, fd;
+ char pidstr[24];
+ char path[24];
+ prpsinfo_t pinfo;
+
+
+ printf("\n%s %-s\n"," ","LOCAL Threads");
+ printf("%s %-s\n"," ","----- -------");
+ printf("%s %-14s %-10s %-12s %-3s %-10s %-10s %-12s\n\n"," ",
+ "Thread", "State", "Wait-Handle",
+ "Cpu","Stk-Base","Stk-Sz","SP");
+ for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
+ qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
+ thr = _PR_ACTIVE_THREAD_PTR(qp);
+ printf("%s 0x%-12x %-10s "," ",thr,_thr_state[thr->state]);
+ if (thr->state == _PR_LOCK_WAIT)
+ handle = thr->wait.lock;
+ else if (thr->state == _PR_COND_WAIT)
+ handle = thr->wait.cvar;
+ else
+ handle = NULL;
+ if (handle)
+ printf("0x%-10x ",handle);
+ else
+ printf("%-12s "," ");
+ printf("%-3d ",thr->cpu->id);
+ printf("0x%-8x ",thr->stack->stackBottom);
+ printf("0x%-8x ",thr->stack->stackSize);
+ printf("0x%-10x\n",thr->md.jb[JB_SP]);
+ }
+
+ printf("\n%s %-s\n"," ","GLOBAL Threads");
+ printf("%s %-s\n"," ","------ -------");
+ printf("%s %-14s %-6s %-12s %-12s %-12s %-12s\n\n"," ","Thread",
+ "Pid","State","Wait-Handle",
+ "Stk-Base","Stk-Sz");
+
+ for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
+ qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
+ thr = _PR_ACTIVE_THREAD_PTR(qp);
+ if (thr->cpu != NULL)
+ continue; /* it is a cpu thread */
+ printf("%s 0x%-12x %-6d "," ",thr,thr->md.id);
+ /*
+ * check if the sproc is still running
+ * first call prctl(PR_GETSHMASK,pid) to check if
+ * the process is part of the share group (the pid
+ * could have been recycled by the OS)
+ */
+ if (prctl(PR_GETSHMASK,thr->md.id) < 0) {
+ printf("%-12s\n","TERMINATED");
+ continue;
+ }
+ /*
+ * Now, check if the sproc terminated and is in zombie
+ * state
+ */
+ sprintf(path,"/proc/pinfo/%s","00000");
+ len = strlen(path);
+ sprintf(pidstr,"%d",thr->md.id);
+ len -= strlen(pidstr);
+ sprintf(path + len,"%s",pidstr);
+ fd = open(path,O_RDONLY);
+ if (fd >= 0) {
+ if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
+ printf("%-12s ","TERMINATED");
+ else if (pinfo.pr_zomb)
+ printf("%-12s ","TERMINATED");
+ else
+ printf("%-12s ",_thr_state[thr->state]);
+ close(fd);
+ } else {
+ printf("%-12s ","TERMINATED");
+ }
+
+ if (thr->state == _PR_LOCK_WAIT)
+ handle = thr->wait.lock;
+ else if (thr->state == _PR_COND_WAIT)
+ handle = thr->wait.cvar;
+ else
+ handle = NULL;
+ if (handle)
+ printf("%-12x ",handle);
+ else
+ printf("%-12s "," ");
+ printf("0x%-10x ",thr->stack->stackBottom);
+ printf("0x%-10x\n",thr->stack->stackSize);
+ }
+
+ printf("\n%s %-s\n"," ","CPUs");
+ printf("%s %-s\n"," ","----");
+ printf("%s %-14s %-6s %-12s \n\n"," ","Id","Pid","State");
+
+
+ for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
+ cpu = _PR_CPU_PTR(qp);
+ printf("%s %-14d %-6d "," ",cpu->id,cpu->md.id);
+ /*
+ * check if the sproc is still running
+ * first call prctl(PR_GETSHMASK,pid) to check if
+ * the process is part of the share group (the pid
+ * could have been recycled by the OS)
+ */
+ if (prctl(PR_GETSHMASK,cpu->md.id) < 0) {
+ printf("%-12s\n","TERMINATED");
+ continue;
+ }
+ /*
+ * Now, check if the sproc terminated and is in zombie
+ * state
+ */
+ sprintf(path,"/proc/pinfo/%s","00000");
+ len = strlen(path);
+ sprintf(pidstr,"%d",cpu->md.id);
+ len -= strlen(pidstr);
+ sprintf(path + len,"%s",pidstr);
+ fd = open(path,O_RDONLY);
+ if (fd >= 0) {
+ if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
+ printf("%-12s\n","TERMINATED");
+ else if (pinfo.pr_zomb)
+ printf("%-12s\n","TERMINATED");
+ else
+ printf("%-12s\n","RUNNING");
+ close(fd);
+ } else {
+ printf("%-12s\n","TERMINATED");
+ }
+
+ }
+ fflush(stdout);
+}
+#endif /* defined(_PR_PTHREADS) */
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#if !defined(_PR_PTHREADS)
+ if (isCurrent) {
+ (void) setjmp(t->md.jb);
+ }
+ *np = sizeof(t->md.jb) / sizeof(PRWord);
+ return (PRWord *) (t->md.jb);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+void _MD_EarlyInit(void)
+{
+#if !defined(_PR_PTHREADS)
+ char *eval;
+ int fd;
+
+ sigemptyset(&ints_off);
+ sigaddset(&ints_off, SIGALRM);
+ sigaddset(&ints_off, SIGIO);
+ sigaddset(&ints_off, SIGCLD);
+
+ if (eval = getenv("_NSPR_TERMINATE_ON_ERROR"))
+ _nspr_terminate_on_error = (0 == atoi(eval) == 0) ? PR_FALSE : PR_TRUE;
+
+ fd = open("/dev/zero",O_RDWR , 0);
+ if (fd < 0) {
+ perror("open /dev/zero failed");
+ exit(1);
+ }
+ /*
+ * Set up the sproc private data area.
+ * This region exists at the same address, _nspr_sproc_private, for
+ * every sproc, but each sproc gets a private copy of the region.
+ */
+ _nspr_sproc_private = mmap(0, _pr_pageSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE| MAP_LOCAL, fd, 0);
+ if (_nspr_sproc_private == (void*)-1) {
+ perror("mmap /dev/zero failed");
+ exit(1);
+ }
+ close(fd);
+#endif
+ _MD_IntervalInit();
+} /* _MD_EarlyInit */
+
+void _MD_IrixInit()
+{
+#if !defined(_PR_PTHREADS)
+ struct sigaction sigact;
+ rlim_t stack_max_limit;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+#ifndef IRIX5_3
+ /*
+ * enable user-level processing of sigprocmask(); this is an undocumented
+ * feature available in Irix 6.2, 6.3, 6.4 and 6.5
+ */
+ __sgi_prda_procmask(USER_LEVEL);
+#endif
+
+ /*
+ * set up SIGUSR1 handler; this is used to save state
+ * during PR_SuspendAll
+ */
+ sigact.sa_handler = save_context_and_block;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = ints_off;
+ sigaction(SIGUSR1, &sigact, 0);
+
+ /*
+ * Change the name of the core file from core to core.pid,
+ * This is inherited by the sprocs created by this process
+ */
+#ifndef IRIX5_3
+ prctl(PR_COREPID, 0, 1);
+#endif
+ /*
+ * Irix-specific terminate on error processing
+ */
+ if (_nspr_terminate_on_error) {
+ /*
+ * PR_SETABORTSIG is a new command implemented in a patch to
+ * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
+ * sprocs in the process when one of them terminates abnormally
+ *
+ */
+ if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
+ /*
+ * if (errno == EINVAL)
+ *
+ * PR_SETABORTSIG not supported under this OS.
+ * You may want to get a recent kernel rollup patch that
+ * supports this feature.
+ *
+ */
+ /*
+ * PR_SETEXITSIG - send the SIGCLD signal to the parent
+ * sproc when any sproc terminates
+ *
+ * This is used to cause the entire application to
+ * terminate when any sproc terminates abnormally by
+ * receipt of a SIGSEGV, SIGBUS or SIGABRT signal.
+ * If this is not done, the application may seem
+ * "hung" to the user because the other sprocs may be
+ * waiting for resources held by the
+ * abnormally-terminating sproc.
+ */
+ prctl(PR_SETEXITSIG, 0);
+
+ sigact.sa_handler = sigchld_handler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = ints_off;
+ sigaction(SIGCLD, &sigact, NULL);
+ }
+ }
+
+ /*
+ * setup stack fields for the primordial thread
+ */
+ me->stack->stackSize = prctl(PR_GETSTACKSIZE);
+ me->stack->stackBottom = me->stack->stackTop - me->stack->stackSize;
+#endif /* _PR_PTHREADS */
+
+ _PR_UnixInit();
+}
+
+/**************************************************************************/
+/************** code and such for NSPR 2.0's interval times ***************/
+/**************************************************************************/
+
+#define PR_CLOCK_GRANULARITY 10000UL
+
+#ifndef SGI_CYCLECNTR_SIZE
+#define SGI_CYCLECNTR_SIZE 165 /* Size user needs to use to read CC */
+#endif
+
+static PRUintn mmem_fd = -1;
+static PRIntn clock_width = 0;
+static void *iotimer_addr = NULL;
+static PRUint32 pr_clock_mask = 0;
+static PRUint32 pr_clock_shift = 0;
+static PRIntervalTime pr_ticks = 0;
+static PRUint32 pr_clock_granularity = 1;
+static PRUint32 pr_previous = 0, pr_residual = 0;
+
+extern PRIntervalTime _PR_UNIX_GetInterval(void);
+extern PRIntervalTime _PR_UNIX_TicksPerSecond(void);
+
+void _MD_IntervalInit(void)
+{
+ /*
+ * As much as I would like, the service available through this
+ * interface on R3000's (aka, IP12) just isn't going to make it.
+ * The register is only 24 bits wide, and rolls over at a verocious
+ * rate.
+ */
+ PRUint32 one_tick = 0;
+ struct utsname utsinfo;
+ uname(&utsinfo);
+ if ((strncmp("IP12", utsinfo.machine, 4) != 0)
+ && ((mmem_fd = open("/dev/mmem", O_RDONLY)) != -1))
+ {
+ int poffmask = getpagesize() - 1;
+ __psunsigned_t phys_addr, raddr, cycleval;
+
+ phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval);
+ raddr = phys_addr & ~poffmask;
+ iotimer_addr = mmap(
+ 0, poffmask, PROT_READ, MAP_PRIVATE, mmem_fd, (__psint_t)raddr);
+
+ clock_width = syssgi(SGI_CYCLECNTR_SIZE);
+ if (clock_width < 0)
+ {
+ /*
+ * We must be executing on a 6.0 or earlier system, since the
+ * SGI_CYCLECNTR_SIZE call is not supported.
+ *
+ * The only pre-6.1 platforms with 64-bit counters are
+ * IP19 and IP21 (Challenge, PowerChallenge, Onyx).
+ */
+ if (!strncmp(utsinfo.machine, "IP19", 4) ||
+ !strncmp(utsinfo.machine, "IP21", 4))
+ clock_width = 64;
+ else
+ clock_width = 32;
+ }
+
+ /*
+ * 'cycleval' is picoseconds / increment of the counter.
+ * I'm pushing for a tick to be 100 microseconds, 10^(-4).
+ * That leaves 10^(-8) left over, or 10^8 / cycleval.
+ * Did I do that right?
+ */
+
+ one_tick = 100000000UL / cycleval ; /* 100 microseconds */
+
+ while (0 != one_tick)
+ {
+ pr_clock_shift += 1;
+ one_tick = one_tick >> 1;
+ pr_clock_granularity = pr_clock_granularity << 1;
+ }
+ pr_clock_mask = pr_clock_granularity - 1; /* to make a mask out of it */
+
+ iotimer_addr = (void*)
+ ((__psunsigned_t)iotimer_addr + (phys_addr & poffmask));
+ }
+
+} /* _PR_MD_INTERVAL_INIT */
+
+PRIntervalTime _MD_IntervalPerSec()
+{
+ return pr_clock_granularity;
+}
+
+PRIntervalTime _MD_GetInterval()
+{
+ if (mmem_fd != -1)
+ {
+ if (64 == clock_width)
+ {
+ PRUint64 temp = *(PRUint64*)iotimer_addr;
+ pr_ticks = (PRIntervalTime)(temp >> pr_clock_shift);
+ }
+ else
+ {
+ PRIntervalTime ticks = pr_ticks;
+ PRUint32 now = *(PRUint32*)iotimer_addr, temp;
+ PRUint32 residual = pr_residual, previous = pr_previous;
+
+ temp = now - previous + residual;
+ residual = temp & pr_clock_mask;
+ ticks += temp >> pr_clock_shift;
+
+ pr_previous = now;
+ pr_residual = residual;
+ pr_ticks = ticks;
+ }
+ }
+ else
+ {
+ /*
+ * No fast access. Use the time of day clock. This isn't the
+ * right answer since this clock can get set back, tick at odd
+ * rates, and it's expensive to acqurie.
+ */
+ pr_ticks = _PR_UNIX_GetInterval();
+ PR_ASSERT(PR_CLOCK_GRANULARITY > _PR_UNIX_TicksPerSecond());
+ pr_ticks *= (PR_CLOCK_GRANULARITY / _PR_UNIX_TicksPerSecond());
+ }
+ return pr_ticks;
+} /* _MD_GetInterval */
+
diff --git a/pr/src/md/unix/linux.c b/pr/src/md/unix/linux.c
new file mode 100644
index 00000000..a653f47b
--- /dev/null
+++ b/pr/src/md/unix/linux.c
@@ -0,0 +1,230 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifdef _PR_PTHREADS
+
+extern void _MD_unix_terminate_waitpid_daemon(void);
+
+void _MD_CleanupBeforeExit(void)
+{
+ _MD_unix_terminate_waitpid_daemon();
+}
+
+#else /* ! _PR_PTHREADS */
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for OSF1 */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for OSF1.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for OSF1.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
+
+#if defined(_PR_NEED_FAKE_POLL)
+
+#include <fcntl.h>
+
+int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
+{
+ int i;
+ int rv;
+ int maxfd;
+ fd_set rd, wr, ex;
+ struct timeval tv, *tvp;
+
+ if (timeout < 0 && timeout != -1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (timeout == -1) {
+ tvp = NULL;
+ } else {
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+ tvp = &tv;
+ }
+
+ maxfd = -1;
+ FD_ZERO(&rd);
+ FD_ZERO(&wr);
+ FD_ZERO(&ex);
+
+ for (i = 0; i < nfds; i++) {
+ int osfd = filedes[i].fd;
+ int events = filedes[i].events;
+ PRBool fdHasEvent = PR_FALSE;
+
+ if (osfd < 0) {
+ continue; /* Skip this osfd. */
+ }
+
+ /*
+ * Map the native poll flags to nspr poll flags.
+ * POLLIN, POLLRDNORM ===> PR_POLL_READ
+ * POLLOUT, POLLWRNORM ===> PR_POLL_WRITE
+ * POLLPRI, POLLRDBAND ===> PR_POLL_EXCEPTION
+ * POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
+ * are ignored.
+ *
+ * The output events POLLERR and POLLHUP are never turned on.
+ * POLLNVAL may be turned on.
+ */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ FD_SET(osfd, &rd);
+ fdHasEvent = PR_TRUE;
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ FD_SET(osfd, &wr);
+ fdHasEvent = PR_TRUE;
+ }
+ if (events & (POLLPRI | POLLRDBAND)) {
+ FD_SET(osfd, &ex);
+ fdHasEvent = PR_TRUE;
+ }
+ if (fdHasEvent && osfd > maxfd) {
+ maxfd = osfd;
+ }
+ }
+
+ rv = select(maxfd + 1, &rd, &wr, &ex, tvp);
+
+ /* Compute poll results */
+ if (rv > 0) {
+ rv = 0;
+ for (i = 0; i < nfds; i++) {
+ PRBool fdHasEvent = PR_FALSE;
+
+ filedes[i].revents = 0;
+ if (filedes[i].fd < 0) {
+ continue;
+ }
+ if (FD_ISSET(filedes[i].fd, &rd)) {
+ if (filedes[i].events & POLLIN) {
+ filedes[i].revents |= POLLIN;
+ }
+ if (filedes[i].events & POLLRDNORM) {
+ filedes[i].revents |= POLLRDNORM;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (FD_ISSET(filedes[i].fd, &wr)) {
+ if (filedes[i].events & POLLOUT) {
+ filedes[i].revents |= POLLOUT;
+ }
+ if (filedes[i].events & POLLWRNORM) {
+ filedes[i].revents |= POLLWRNORM;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (FD_ISSET(filedes[i].fd, &ex)) {
+ if (filedes[i].events & POLLPRI) {
+ filedes[i].revents |= POLLPRI;
+ }
+ if (filedes[i].events & POLLRDBAND) {
+ filedes[i].revents |= POLLRDBAND;
+ }
+ fdHasEvent = PR_TRUE;
+ }
+ if (fdHasEvent) {
+ rv++;
+ }
+ }
+ PR_ASSERT(rv > 0);
+ } else if (rv == -1 && errno == EBADF) {
+ rv = 0;
+ for (i = 0; i < nfds; i++) {
+ filedes[i].revents = 0;
+ if (filedes[i].fd < 0) {
+ continue;
+ }
+ if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) {
+ filedes[i].revents = POLLNVAL;
+ rv++;
+ }
+ }
+ PR_ASSERT(rv > 0);
+ }
+ PR_ASSERT(-1 != timeout || rv != 0);
+
+ return rv;
+}
+#endif /* _PR_NEED_FAKE_POLL */
diff --git a/pr/src/md/unix/ncr.c b/pr/src/md/unix/ncr.c
new file mode 100644
index 00000000..1560cc2c
--- /dev/null
+++ b/pr/src/md/unix/ncr.c
@@ -0,0 +1,367 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+/*
+ * NCR 3.0 - cloned from UnixWare by ruslan
+ */
+#include "primpl.h"
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+#ifdef ALARMS_BREAK_TCP /* I don't think they do */
+
+PRInt32 _MD_connect(PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _connect(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+}
+
+PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _accept(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+ return(rv);
+}
+#endif
+
+/*
+ * These are also implemented in pratom.c using NSPR locks. Any reason
+ * this might be better or worse? If you like this better, define
+ * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h
+ */
+#ifdef _PR_HAVE_ATOMIC_OPS
+/* Atomic operations */
+#include <stdio.h>
+static FILE *_uw_semf;
+
+void
+_MD_INIT_ATOMIC(void)
+{
+ /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */
+ if ((_uw_semf = tmpfile()) == NULL)
+ PR_ASSERT(0);
+
+ return;
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)++;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)--;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ flockfile(_uw_semf);
+ *val = newval;
+ unflockfile(_uw_semf);
+}
+#endif
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Unixware */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Unixware.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRUintn priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware.");
+ return PR_FAILURE;
+}
+
+/*
+ This is temp. replacement for localtime_r. Normally PR_ExplodeTime should
+ be used as to my understanding
+*/
+
+/*
+** $$$$$ THEN WHY ARE WE DOING THIS? - AOF $$$$$
+*/
+
+#define NEED_LOCALTIME_R
+#define NEED_GMTIME_R
+#define NEED_ASCTIME_R
+#define NEED_STRTOK_R
+#define NEED_CTIME_R
+
+#if defined (NEED_LOCALTIME_R) || defined (NEED_CTIME_R) || defined (NEED_ASCTIME_R) || defined (NEED_GMTIME_R) || defined (NEED_STRTOK_R)
+#include "prlock.h"
+#endif
+
+#if defined (NEED_LOCALTIME_R)
+
+static PRLock *localtime_r_monitor = NULL;
+
+struct tm *localtime_r (const time_t *clock, struct tm *result)
+{
+ struct tm *tmPtr;
+ int needLock = PR_Initialized(); /* We need to use a lock to protect
+ * against NSPR threads only when the
+ * NSPR thread system is activated. */
+
+ if (needLock) {
+ if (localtime_r_monitor == NULL) {
+
+ localtime_r_monitor = PR_NewLock();
+ }
+ PR_Lock(localtime_r_monitor);
+ }
+
+ /*
+ * On Windows, localtime() returns a NULL pointer if 'clock'
+ * represents a time before midnight January 1, 1970. In
+ * that case, we also return a NULL pointer and the struct tm
+ * object pointed to by 'result' is not modified.
+ */
+
+ tmPtr = localtime(clock);
+ if (tmPtr) {
+ *result = *tmPtr;
+ } else {
+ result = NULL;
+ }
+
+ if (needLock) PR_Unlock(localtime_r_monitor);
+
+ return result;
+}
+
+#endif
+
+#if defined (NEED_GMTIME_R)
+
+static PRLock *gmtime_r_monitor = NULL;
+
+struct tm *gmtime_r (const time_t *clock, struct tm *result)
+{
+ struct tm *tmPtr;
+ int needLock = PR_Initialized(); /* We need to use a lock to protect
+ * against NSPR threads only when the
+ * NSPR thread system is activated. */
+
+ if (needLock) {
+ if (gmtime_r_monitor == NULL) {
+ gmtime_r_monitor = PR_NewLock();
+ }
+ PR_Lock(gmtime_r_monitor);
+ }
+
+ tmPtr = gmtime(clock);
+ if (tmPtr) {
+ *result = *tmPtr;
+ } else {
+ result = NULL;
+ }
+
+ if (needLock) PR_Unlock(gmtime_r_monitor);
+
+ return result;
+}
+
+#endif
+
+#if defined (NEED_CTIME_R)
+
+static PRLock *ctime_r_monitor = NULL;
+
+char *ctime_r (const time_t *clock, char *buf, int buflen)
+{
+ char *cbuf;
+ int needLock = PR_Initialized(); /* We need to use a lock to protect
+ * against NSPR threads only when the
+ * NSPR thread system is activated. */
+
+ if (needLock) {
+
+ if (ctime_r_monitor == NULL) {
+ ctime_r_monitor = PR_NewLock();
+ }
+ PR_Lock(ctime_r_monitor);
+ }
+
+ cbuf = ctime (clock);
+ if (cbuf) {
+ strncpy (buf, cbuf, buflen - 1);
+ buf[buflen - 1] = 0;
+ }
+
+ if (needLock) PR_Unlock(ctime_r_monitor);
+
+ return cbuf;
+}
+
+#endif
+
+#if defined (NEED_ASCTIME_R)
+
+static PRLock *asctime_r_monitor = NULL;
+
+
+char *asctime_r (const struct tm *tm, char *buf, int buflen)
+{
+ char *cbuf;
+ int needLock = PR_Initialized(); /* We need to use a lock to protect
+ * against NSPR threads only when the
+ * NSPR thread system is activated. */
+
+ if (needLock) {
+ if (asctime_r_monitor == NULL) {
+ asctime_r_monitor = PR_NewLock();
+ }
+ PR_Lock(asctime_r_monitor);
+ }
+
+ cbuf = asctime (tm);
+ if (cbuf) {
+ strncpy (buf, cbuf, buflen - 1);
+ buf[buflen - 1] = 0;
+ }
+
+ if (needLock) PR_Unlock(asctime_r_monitor);
+
+ return cbuf;
+
+}
+#endif
+
+#if defined (NEED_STRTOK_R)
+
+char *
+strtok_r (s, delim, last)
+ register char *s;
+ register const char *delim;
+ register char **last;
+{
+ register char *spanp;
+ register int c, sc;
+ char *tok;
+
+
+ if (s == NULL && (s = *last) == NULL)
+ return (NULL);
+
+ /*
+ * Skip (span) leading delimiters (s += strspn(s, delim), sort of).
+ */
+cont:
+
+ c = *s++;
+ for (spanp = (char *)delim; (sc = *spanp++) != 0;) {
+ if (c == sc)
+ goto cont;
+ }
+
+ if (c == 0) { /* no non-delimiter characters */
+ *last = NULL;
+ return (NULL);
+ }
+ tok = s - 1;
+
+ /*
+ * Scan token (scan for delimiters: s += strcspn(s, delim), sort of).
+ * Note that delim must have one NUL; we stop if we see that, too.
+ */
+ for (;;) {
+ c = *s++;
+ spanp = (char *)delim;
+ do {
+ if ((sc = *spanp++) == c) {
+ if (c == 0)
+ s = NULL;
+
+ else
+ s[-1] = 0;
+ *last = s;
+ return (tok);
+ }
+ } while (sc != 0);
+ }
+ /* NOTREACHED */
+}
+
+#endif
diff --git a/pr/src/md/unix/nec.c b/pr/src/md/unix/nec.c
new file mode 100644
index 00000000..3b466cfa
--- /dev/null
+++ b/pr/src/md/unix/nec.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for NEC */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for NEC.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for NEC.");
+ return PR_FAILURE;
+}
diff --git a/pr/src/md/unix/objs.mk b/pr/src/md/unix/objs.mk
new file mode 100644
index 00000000..7882ce27
--- /dev/null
+++ b/pr/src/md/unix/objs.mk
@@ -0,0 +1,176 @@
+#
+# The contents of this file are subject to the Netscape Public License
+# Version 1.0 (the "NPL"); you may not use this file except in
+# compliance with the NPL. You may obtain a copy of the NPL at
+# http://www.mozilla.org/NPL/
+#
+# Software distributed under the NPL is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+# for the specific language governing rights and limitations under the
+# NPL.
+#
+# The Initial Developer of this code under the NPL is Netscape
+# Communications Corporation. Portions created by Netscape are
+# Copyright (C) 1998 Netscape Communications Corporation. All Rights
+# Reserved.
+#
+
+# This makefile appends to the variable OBJS the platform-dependent
+# object modules that will be part of the nspr20 library.
+
+CSRCS = \
+ unix.c \
+ unix_errors.c \
+ uxproces.c \
+ uxwrap.c \
+ $(NULL)
+
+PTH_USER_CSRCS = \
+ pthreads_user.c \
+ $(NULL)
+
+IRIX_CSRCS = \
+ irix.c \
+ $(NULL)
+
+SUNOS4_CSRCS = \
+ sunos4.c \
+ $(NULL)
+
+SOLARIS_CSRCS = \
+ solaris.c \
+ $(NULL)
+
+AIX_CSRCS = \
+ aix.c \
+ $(NULL)
+
+FREEBSD_CSRCS = \
+ freebsd.c \
+ $(NULL)
+
+BSDI_CSRCS = \
+ bsdi.c \
+ $(NULL)
+
+HPUX_CSRCS = \
+ hpux.c \
+ $(NULL)
+
+OSF1_CSRCS = \
+ osf1.c \
+ $(NULL)
+
+LINUX_CSRCS = \
+ linux.c \
+ $(NULL)
+
+UNIXWARE_CSRCS = \
+ unixware.c \
+ $(NULL)
+
+RELIANTUNIX_CSRCS = \
+ reliantunix.c \
+ $(NULL)
+
+NEC_CSRCS = \
+ nec.c \
+ $(NULL)
+
+SONY_CSRCS = \
+ sony.c \
+ $(NULL)
+
+NCR_CSRCS = \
+ ncr.c \
+ $(NULL)
+
+SCOOS_CSRCS = \
+ scoos.c \
+ $(NULL)
+
+
+ifeq ($(PTHREADS_USER),1)
+CSRCS += $(PTH_USER_CSRCS)
+endif
+
+ifeq ($(OS_ARCH),IRIX)
+CSRCS += $(IRIX_CSRCS)
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(OS_RELEASE),4.1.3_U1)
+CSRCS += $(SUNOS4_CSRCS)
+else
+CSRCS += $(SOLARIS_CSRCS)
+endif
+endif
+
+ifeq ($(OS_ARCH),AIX)
+CSRCS += $(AIX_CSRCS)
+endif
+ifeq ($(OS_ARCH),FreeBSD)
+CSRCS += $(FREEBSD_CSRCS)
+endif
+ifeq ($(OS_ARCH),BSD_386)
+CSRCS += $(BSDI_CSRCS)
+endif
+ifeq ($(OS_ARCH),HP-UX)
+CSRCS += $(HPUX_CSRCS)
+endif
+ifeq ($(OS_ARCH),OSF1)
+CSRCS += $(OSF1_CSRCS)
+endif
+ifeq ($(OS_ARCH),Linux)
+CSRCS += $(LINUX_CSRCS)
+endif
+ifeq ($(OS_ARCH),UNIXWARE)
+CSRCS += $(UNIXWARE_CSRCS)
+endif
+ifeq ($(OS_ARCH),ReliantUNIX)
+CSRCS += $(RELIANTUNIX_CSRCS)
+endif
+ifeq ($(OS_ARCH),NEC)
+CSRCS += $(NEC_CSRCS)
+endif
+ifeq ($(OS_ARCH),NEWS-OS)
+CSRCS += $(SONY_CSRCS)
+endif
+ifeq ($(OS_ARCH),NCR)
+CSRCS += $(NCR_CSRCS)
+endif
+ifeq ($(OS_ARCH),SCO_SV)
+CSRCS += $(SCOOS_CSRCS)
+endif
+
+#
+# Some Unix platforms have an assembly language file.
+# E.g., AIX 3.2, Solaris (both sparc and x86).
+#
+ifeq ($(OS_ARCH), AIX)
+ ifeq ($(OS_RELEASE), 3.2)
+ ASFILES = os_$(OS_ARCH).s
+ endif
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+ ifeq ($(OS_TEST),i86pc)
+ ASFILES = os_$(OS_ARCH)_x86.s
+ else
+ ifneq ($(OS_RELEASE),4.1.3_U1)
+ ASFILES = os_$(OS_ARCH).s
+ endif
+ endif
+endif
+
+ifeq ($(OS_ARCH), ReliantUNIX)
+ ASFILES = os_$(OS_ARCH).s
+endif
+
+ifeq ($(OS_ARCH)$(OS_RELEASE),BSD_3862.1)
+ ASFILES = os_BSD_386_2.s
+endif
+
+OBJS += $(addprefix md/unix/$(OBJDIR)/,$(CSRCS:.c=.o)) \
+ $(addprefix md/unix/$(OBJDIR)/,$(ASFILES:.s=.o))
+
diff --git a/pr/src/md/unix/os_AIX.s b/pr/src/md/unix/os_AIX.s
new file mode 100644
index 00000000..63ca4e78
--- /dev/null
+++ b/pr/src/md/unix/os_AIX.s
@@ -0,0 +1,102 @@
+# -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+#
+# The contents of this file are subject to the Netscape Public License
+# Version 1.0 (the "NPL"); you may not use this file except in
+# compliance with the NPL. You may obtain a copy of the NPL at
+# http://www.mozilla.org/NPL/
+#
+# Software distributed under the NPL is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+# for the specific language governing rights and limitations under the
+# NPL.
+#
+# The Initial Developer of this code under the NPL is Netscape
+# Communications Corporation. Portions created by Netscape are
+# Copyright (C) 1998 Netscape Communications Corporation. All Rights
+# Reserved.
+#
+.set r0,0; .set SP,1; .set RTOC,2; .set r3,3; .set r4,4
+.set r5,5; .set r6,6; .set r7,7; .set r8,8; .set r9,9
+.set r10,10; .set r11,11; .set r12,12; .set r13,13; .set r14,14
+.set r15,15; .set r16,16; .set r17,17; .set r18,18; .set r19,19
+.set r20,20; .set r21,21; .set r22,22; .set r23,23; .set r24,24
+.set r25,25; .set r26,26; .set r27,27; .set r28,28; .set r29,29
+.set r30,30; .set r31,31
+
+
+ .rename H.10.NO_SYMBOL{PR},""
+ .rename H.18.longjmp{TC},"longjmp"
+
+ .lglobl H.10.NO_SYMBOL{PR}
+ .globl .longjmp
+ .globl longjmp{DS}
+ .extern .sigcleanup
+ .extern .jmprestfpr
+
+# .text section
+
+ .csect H.10.NO_SYMBOL{PR}
+.longjmp:
+ mr r13,r3
+ mr r14,r4
+ stu SP,-56(SP)
+ bl .sigcleanup
+ l RTOC,0x14(SP)
+ cal SP,0x38(SP)
+ mr r3,r13
+ mr r4,r14
+ l r5,0x8(r3)
+ l SP,0xc(r3)
+ l r7,0xf8(r3)
+ st r7,0x0(SP)
+ l RTOC,0x10(r3)
+ bl .jmprestfpr
+# 1 == cr0 in disassembly
+ cmpi 1,r4,0x0
+ mtlr r5
+ lm r13,0x14(r3)
+ l r5,0x60(r3)
+ mtcrf 0x38,r5
+ mr r3,r4
+ bne __L1
+ lil r3,0x1
+__L1:
+ br
+
+# traceback table
+ .long 0x00000000
+ .byte 0x00 # VERSION=0
+ .byte 0x00 # LANG=TB_C
+ .byte 0x20 # IS_GL=0,IS_EPROL=0,HAS_TBOFF=1
+ # INT_PROC=0,HAS_CTL=0,TOCLESS=0
+ # FP_PRESENT=0,LOG_ABORT=0
+ .byte 0x40 # INT_HNDL=0,NAME_PRESENT=1
+ # USES_ALLOCA=0,CL_DIS_INV=WALK_ONCOND
+ # SAVES_CR=0,SAVES_LR=0
+ .byte 0x80 # STORES_BC=1,FPR_SAVED=0
+ .byte 0x00 # GPR_SAVED=0
+ .byte 0x02 # FIXEDPARMS=2
+ .byte 0x01 # FLOATPARMS=0,PARMSONSTK=1
+ .long 0x00000000 #
+ .long 0x00000014 # TB_OFFSET
+ .short 7 # NAME_LEN
+ .byte "longjmp"
+ .byte 0 # padding
+ .byte 0 # padding
+ .byte 0 # padding
+# End of traceback table
+ .long 0x00000000 # "\0\0\0\0"
+
+# .data section
+
+ .toc # 0x00000038
+T.18.longjmp:
+ .tc H.18.longjmp{TC},longjmp{DS}
+
+ .csect longjmp{DS}
+ .long .longjmp # "\0\0\0\0"
+ .long TOC{TC0} # "\0\0\0008"
+ .long 0x00000000 # "\0\0\0\0"
+# End csect longjmp{DS}
+
+# .bss section
diff --git a/pr/src/md/unix/os_BSD_386_2.s b/pr/src/md/unix/os_BSD_386_2.s
new file mode 100644
index 00000000..971ac1b2
--- /dev/null
+++ b/pr/src/md/unix/os_BSD_386_2.s
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+/*
+ * os_BSD_386_2.s
+ * We need to define our own setjmp/longjmp on BSDI 2.x because libc's
+ * implementation does some sanity checking that defeats user level threads.
+ * This should no longer be necessary in BSDI 3.0.
+ */
+
+.globl _setjmp
+.align 2
+_setjmp:
+ movl 4(%esp),%eax
+ movl 0(%esp),%edx
+ movl %edx, 0(%eax) /* rta */
+ movl %ebx, 4(%eax)
+ movl %esp, 8(%eax)
+ movl %ebp,12(%eax)
+ movl %esi,16(%eax)
+ movl %edi,20(%eax)
+ movl $0,%eax
+ ret
+
+.globl _longjmp
+.align 2
+_longjmp:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ movl 0(%edx),%ecx
+ movl 4(%edx),%ebx
+ movl 8(%edx),%esp
+ movl 12(%edx),%ebp
+ movl 16(%edx),%esi
+ movl 20(%edx),%edi
+ cmpl $0,%eax
+ jne 1f
+ movl $1,%eax
+1: movl %ecx,0(%esp)
+ ret
diff --git a/pr/src/md/unix/os_ReliantUNIX.s b/pr/src/md/unix/os_ReliantUNIX.s
new file mode 100644
index 00000000..4ac8b0c1
--- /dev/null
+++ b/pr/src/md/unix/os_ReliantUNIX.s
@@ -0,0 +1,108 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+/* We want position independent code */
+#define PIC
+
+#include <sys/asm.h>
+#include <sys/regdef.h>
+#include <sys/syscall.h>
+
+ .file 1 "os_ReliantUNIX.s"
+ .option pic2
+ .text
+
+ .align 2
+ .globl getcxt
+ .ent getcxt
+getcxt:
+ .frame sp,0,$31 # vars= 0, regs= 0/0, args= 0, extra= 0
+ # saved integer regs
+ sw ra,180(a0) # gpregs[CXT_EPC]
+ sw gp,152(a0) # gpregs[CXT_GP]
+ sw sp,156(a0) # gpregs[CXT_SP]
+ sw s8,160(a0) # gpregs[CXT_S8]
+ sw s0,104(a0) # gpregs[CXT_S0]
+ sw s1,108(a0) # gpregs[CXT_S1]
+ sw s2,112(a0) # gpregs[CXT_S2]
+ sw s3,116(a0) # gpregs[CXT_S3]
+ sw s4,120(a0) # gpregs[CXT_S4]
+ sw s5,124(a0) # gpregs[CXT_S5]
+ sw s6,128(a0) # gpregs[CXT_S6]
+ sw s7,132(a0) # gpregs[CXT_S7]
+ # csr
+ cfc1 v0,$31
+ # saved float regs
+ s.d $f20,264(a0) # fpregs.fp_r.fp_dregs[10]
+ s.d $f22,272(a0) # fpregs.fp_r.fp_dregs[11]
+ s.d $f24,280(a0) # fpregs.fp_r.fp_dregs[12]
+ s.d $f26,288(a0) # fpregs.fp_r.fp_dregs[13]
+ s.d $f28,296(a0) # fpregs.fp_r.fp_dregs[14]
+ s.d $f30,304(a0) # fpregs.fp_r.fp_dregs[15]
+ sw v0,312(a0) # fpregs.fp_csr
+
+ # give no illusions about the contents
+ li v0,0x0c # UC_CPU | UC_MAU
+ sw v0,0(a0) # uc_flags
+
+ move v0,zero
+ j ra
+ .end getcxt
+
+ .align 2
+ .globl setcxt
+ .ent setcxt
+setcxt:
+ .frame sp,0,$31 # vars= 0, regs= 0/0, args= 0, extra= 0
+ lw v0,312(a0) # fpregs.fp_csr
+ li v1,0xfffc0fff # mask out exception cause bits
+ and v0,v0,v1
+ # saved integer regs
+ lw t9,180(a0) # gpregs[CXT_EPC]
+ lw ra,180(a0) # gpregs[CXT_EPC]
+ lw gp,152(a0) # gpregs[CXT_GP]
+ lw sp,156(a0) # gpregs[CXT_SP]
+ ctc1 v0,$31 # fp_csr
+ lw s8,160(a0) # gpregs[CXT_S8]
+ lw s0,104(a0) # gpregs[CXT_S0]
+ lw s1,108(a0) # gpregs[CXT_S1]
+ lw s2,112(a0) # gpregs[CXT_S2]
+ lw s3,116(a0) # gpregs[CXT_S3]
+ lw s4,120(a0) # gpregs[CXT_S4]
+ lw s5,124(a0) # gpregs[CXT_S5]
+ lw s6,128(a0) # gpregs[CXT_S6]
+ lw s7,132(a0) # gpregs[CXT_S7]
+ # saved float regs
+ l.d $f20,264(a0) # fpregs.fp_r.fp_dregs[10]
+ l.d $f22,272(a0) # fpregs.fp_r.fp_dregs[11]
+ l.d $f24,280(a0) # fpregs.fp_r.fp_dregs[12]
+ l.d $f26,288(a0) # fpregs.fp_r.fp_dregs[13]
+ l.d $f28,296(a0) # fpregs.fp_r.fp_dregs[14]
+ l.d $f30,304(a0) # fpregs.fp_r.fp_dregs[15]
+
+ # load these, too
+ # they were not saved, but maybe the user modified them...
+ lw v0,48(a0)
+ lw v1,52(a0)
+ lw a1,60(a0)
+ lw a2,64(a0)
+ lw a3,68(a0)
+ lw a0,56(a0) # there is no way back
+
+ j ra
+
+ .end setcxt
diff --git a/pr/src/md/unix/os_SunOS.s b/pr/src/md/unix/os_SunOS.s
new file mode 100644
index 00000000..2edf276f
--- /dev/null
+++ b/pr/src/md/unix/os_SunOS.s
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+ .text
+
+/*
+ * sol_getsp()
+ *
+ * Return the current sp (for debugging)
+ */
+ .global sol_getsp
+sol_getsp:
+ retl
+ mov %sp, %o0
+
+
+/*
+ * sol_curthread()
+ *
+ * Return a unique identifier for the currently active thread.
+ */
+ .global sol_curthread
+sol_curthread:
+ retl
+ mov %g7, %o0
+
+
+ .global __MD_FlushRegisterWindows
+ .global _MD_FlushRegisterWindows
+
+__MD_FlushRegisterWindows:
+_MD_FlushRegisterWindows:
+
+ ta 3
+ ret
+ restore
+
diff --git a/pr/src/md/unix/os_SunOS_ultrasparc.s b/pr/src/md/unix/os_SunOS_ultrasparc.s
new file mode 100644
index 00000000..b98b820f
--- /dev/null
+++ b/pr/src/md/unix/os_SunOS_ultrasparc.s
@@ -0,0 +1,157 @@
+! -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+!
+! The contents of this file are subject to the Netscape Public License
+! Version 1.0 (the "NPL"); you may not use this file except in
+! compliance with the NPL. You may obtain a copy of the NPL at
+! http://www.mozilla.org/NPL/
+!
+! Software distributed under the NPL is distributed on an "AS IS" basis,
+! WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+! for the specific language governing rights and limitations under the
+! NPL.
+!
+! The Initial Developer of this code under the NPL is Netscape
+! Communications Corporation. Portions created by Netscape are
+! Copyright (C) 1998 Netscape Communications Corporation. All Rights
+! Reserved.
+!
+!
+! atomic increment, decrement and swap routines for V8+ sparc (ultrasparc)
+! using CAS (compare-and-swap) atomic instructions
+!
+! this MUST be compiled with an ultrasparc-aware assembler
+!
+! standard asm linkage macros; this module must be compiled
+! with the -P option (use C preprocessor)
+
+#include <sys/asm_linkage.h>
+
+! ======================================================================
+!
+! Perform the sequence a = a + 1 atomically with respect to other
+! fetch-and-adds to location a in a wait-free fashion.
+!
+! usage : val = PR_AtomicIncrement(address)
+! return: current value (you'd think this would be old val)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [local] - work register
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(PR_AtomicIncrement) ! standard assembler/ELF prologue
+
+retryAI:
+ ld [%o0], %o2 ! set o2 to the current value
+ add %o2, 0x1, %o3 ! calc the new value
+ mov %o3, %o1 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAI ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o1, %o0 ! set the return code to the new value
+
+ SET_SIZE(PR_AtomicIncrement) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = a - 1 atomically with respect to other
+! fetch-and-decs to location a in a wait-free fashion.
+!
+! usage : val = PR_AtomicDecrement(address)
+! return: current value (you'd think this would be old val)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [local] - work register
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(PR_AtomicDecrement) ! standard assembler/ELF prologue
+
+retryAD:
+ ld [%o0], %o2 ! set o2 to the current value
+ sub %o2, 0x1, %o3 ! calc the new value
+ mov %o3, %o1 ! save the return value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAD ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o1, %o0 ! set the return code to the new value
+
+ SET_SIZE(PR_AtomicDecrement) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+
+! ======================================================================
+!
+! Perform the sequence a = b atomically with respect to other
+! fetch-and-stores to location a in a wait-free fashion.
+!
+! usage : old_val = PR_AtomicSet(address, newval)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller's stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [input] - the new value to set for [%o0]
+! %o2 [local] - work register
+! %o3 [local] - work register
+! -----------------------
+
+ ENTRY(PR_AtomicSet) ! standard assembler/ELF prologue
+
+retryAS:
+ ld [%o0], %o2 ! set o2 to the current value
+ mov %o1, %o3 ! set up the new value
+ cas [%o0], %o2, %o3 ! atomically set if o0 hasn't changed
+ cmp %o2, %o3 ! see if we set the value
+ bne retryAS ! if not, try again
+ nop ! empty out the branch pipeline
+ retl ! return back to the caller
+ mov %o3, %o0 ! set the return code to the prev value
+
+ SET_SIZE(PR_AtomicSet) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
diff --git a/pr/src/md/unix/os_SunOS_x86.s b/pr/src/md/unix/os_SunOS_x86.s
new file mode 100644
index 00000000..867fd2d3
--- /dev/null
+++ b/pr/src/md/unix/os_SunOS_x86.s
@@ -0,0 +1,60 @@
+/ -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+/
+/ The contents of this file are subject to the Netscape Public License
+/ Version 1.0 (the "NPL"); you may not use this file except in
+/ compliance with the NPL. You may obtain a copy of the NPL at
+/ http://www.mozilla.org/NPL/
+/
+/ Software distributed under the NPL is distributed on an "AS IS" basis,
+/ WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+/ for the specific language governing rights and limitations under the
+/ NPL.
+/
+/ The Initial Developer of this code under the NPL is Netscape
+/ Communications Corporation. Portions created by Netscape are
+/ Copyright (C) 1998 Netscape Communications Corporation. All Rights
+/ Reserved.
+/
+ .text
+
+ .globl getedi
+getedi:
+ movl %edi,%eax
+ ret
+ .type getedi,@function
+ .size getedi,.-getedi
+
+ .globl setedi
+setedi:
+ movl 4(%esp),%edi
+ ret
+ .type setedi,@function
+ .size setedi,.-setedi
+
+ .globl __MD_FlushRegisterWindows
+ .globl _MD_FlushRegisterWindows
+
+__MD_FlushRegisterWindows:
+_MD_FlushRegisterWindows:
+
+ ret
+
+/
+/ sol_getsp()
+/
+/ Return the current sp (for debugging)
+/
+ .globl sol_getsp
+sol_getsp:
+ movl %esp, %eax
+ ret
+
+/
+/ sol_curthread()
+/
+/ Return a unique identifier for the currently active thread.
+/
+ .globl sol_curthread
+sol_curthread:
+ movl %ecx, %eax
+ ret
diff --git a/pr/src/md/unix/osf1.c b/pr/src/md/unix/osf1.c
new file mode 100644
index 00000000..e79e6607
--- /dev/null
+++ b/pr/src/md/unix/osf1.c
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for OSF1 */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for OSF1.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for OSF1.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/pr/src/md/unix/pthreads_user.c b/pr/src/md/unix/pthreads_user.c
new file mode 100644
index 00000000..efc3ff70
--- /dev/null
+++ b/pr/src/md/unix/pthreads_user.c
@@ -0,0 +1,465 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+#include <pthread.h>
+
+
+sigset_t ints_off;
+pthread_mutex_t _pr_heapLock;
+pthread_key_t current_thread_key;
+pthread_key_t current_cpu_key;
+pthread_key_t last_thread_key;
+pthread_key_t intsoff_key;
+
+
+PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed;
+PRInt32 _pr_md_pthreads = 1;
+
+void _MD_EarlyInit(void)
+{
+extern PRInt32 _nspr_noclock;
+
+#ifdef HPUX
+ _MD_hpux_install_sigfpe_handler();
+#endif
+
+ if (pthread_key_create(&current_thread_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+ if (pthread_key_create(&current_cpu_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+ if (pthread_key_create(&last_thread_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+ if (pthread_key_create(&intsoff_key, NULL) != 0) {
+ perror("pthread_key_create failed");
+ exit(1);
+ }
+
+ sigemptyset(&ints_off);
+ sigaddset(&ints_off, SIGALRM);
+ sigaddset(&ints_off, SIGIO);
+ sigaddset(&ints_off, SIGCLD);
+
+ /*
+ * disable clock interrupts
+ */
+ _nspr_noclock = 1;
+
+}
+
+void _MD_InitLocks()
+{
+ if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) {
+ perror("pthread_mutex_init failed");
+ exit(1);
+ }
+}
+
+PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
+{
+ PRIntn _is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(_is);
+ pthread_mutex_destroy(&lockp->mutex);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(_is);
+}
+
+
+
+PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
+{
+ PRStatus rv;
+ PRIntn is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+ rv = pthread_mutex_init(&lockp->mutex, NULL);
+ if (me && !_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
+}
+
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+PR_IMPLEMENT(void)
+_MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
+{
+ /*
+ * XXX - to be implemented
+ */
+ return;
+}
+
+PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread *thread)
+{
+ struct sigaction sigact;
+
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ thread->md.pthread = pthread_self();
+#if 0
+ /*
+ * set up SIGUSR1 handler; this is used to save state
+ * during PR_SuspendAll
+ */
+ sigact.sa_handler = save_context_and_block;
+ sigact.sa_flags = SA_RESTART;
+ /*
+ * Must mask clock interrupts
+ */
+ sigact.sa_mask = timer_set;
+ sigaction(SIGUSR1, &sigact, 0);
+#endif
+ }
+
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ _MD_CLEAN_THREAD(thread);
+ _MD_SET_CURRENT_THREAD(NULL);
+ }
+}
+
+PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread *thread)
+{
+ if (thread->flags & _PR_GLOBAL_SCOPE) {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ pthread_cond_destroy(&thread->md.pthread_cond);
+ }
+}
+
+PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ (thread->flags & _PR_GCABLE_THREAD));
+#if 0
+ thread->md.suspending_id = getpid();
+ rv = kill(thread->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+#endif
+}
+
+PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread *thread)
+{
+ PRInt32 rv;
+
+ PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
+ (thread->flags & _PR_GCABLE_THREAD));
+#if 0
+ rv = unblockproc(thread->md.id);
+#endif
+}
+
+PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU *thread)
+{
+ PRInt32 rv;
+
+#if 0
+ cpu->md.suspending_id = getpid();
+ rv = kill(cpu->md.id, SIGUSR1);
+ PR_ASSERT(rv == 0);
+ /*
+ * now, block the current thread/cpu until woken up by the suspended
+ * thread from it's SIGUSR1 signal handler
+ */
+ blockproc(getpid());
+#endif
+}
+
+PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU *thread)
+{
+#if 0
+ unblockproc(cpu->md.id);
+#endif
+}
+
+
+#define PT_NANOPERMICRO 1000UL
+#define PT_BILLION 1000000000UL
+
+PR_IMPLEMENT(PRStatus)
+_pt_wait(PRThread *thread, PRIntervalTime timeout)
+{
+int rv;
+struct timeval now;
+struct timespec tmo;
+PRUint32 ticks = PR_TicksPerSecond();
+
+
+ if (timeout != PR_INTERVAL_NO_TIMEOUT) {
+ tmo.tv_sec = timeout / ticks;
+ tmo.tv_nsec = timeout - (tmo.tv_sec * ticks);
+ tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO *
+ tmo.tv_nsec);
+
+ /* pthreads wants this in absolute time, off we go ... */
+#if defined(SOLARIS) && defined(_SVID_GETTOD)
+ (void)gettimeofday(&now);
+#else
+ (void)gettimeofday(&now, NULL);
+#endif
+ /* that one's usecs, this one's nsecs - grrrr! */
+ tmo.tv_sec += now.tv_sec;
+ tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec);
+ tmo.tv_sec += tmo.tv_nsec / PT_BILLION;
+ tmo.tv_nsec %= PT_BILLION;
+ }
+
+ pthread_mutex_lock(&thread->md.pthread_mutex);
+ thread->md.wait--;
+ if (thread->md.wait < 0) {
+ if (timeout != PR_INTERVAL_NO_TIMEOUT) {
+ rv = pthread_cond_timedwait(&thread->md.pthread_cond,
+ &thread->md.pthread_mutex, &tmo);
+ }
+ else
+ rv = pthread_cond_wait(&thread->md.pthread_cond,
+ &thread->md.pthread_mutex);
+ if (rv != 0) {
+ thread->md.wait++;
+ }
+ } else
+ rv = 0;
+ pthread_mutex_unlock(&thread->md.pthread_mutex);
+
+ return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_wait(PRThread *thread, PRIntervalTime ticks)
+{
+ if ( thread->flags & _PR_GLOBAL_SCOPE ) {
+ _MD_CHECK_FOR_EXIT();
+ if (_pt_wait(thread, ticks) == PR_FAILURE) {
+ _MD_CHECK_FOR_EXIT();
+ /*
+ * wait timed out
+ */
+ _PR_THREAD_LOCK(thread);
+ if (thread->wait.cvar) {
+ /*
+ * The thread will remove itself from the waitQ
+ * of the cvar in _PR_WaitCondVar
+ */
+ thread->wait.cvar = NULL;
+ thread->state = _PR_RUNNING;
+ _PR_THREAD_UNLOCK(thread);
+ } else {
+ _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT);
+ _PR_THREAD_UNLOCK(thread);
+ }
+ }
+ } else {
+ _PR_MD_SWITCH_CONTEXT(thread);
+ }
+ return PR_SUCCESS;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_WakeupWaiter(PRThread *thread)
+{
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 pid, rv;
+ PRIntn is;
+
+ PR_ASSERT(_pr_md_idle_cpus >= 0);
+ if (thread == NULL) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ } else if (!_PR_IS_NATIVE_THREAD(thread)) {
+ /*
+ * If the thread is on my cpu's runq there is no need to
+ * wakeup any cpus
+ */
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (me->cpu != thread->cpu) {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ }
+ } else {
+ if (_pr_md_idle_cpus)
+ _MD_Wakeup_CPUs();
+ }
+ } else {
+ PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+
+ pthread_mutex_lock(&thread->md.pthread_mutex);
+ thread->md.wait++;
+ rv = pthread_cond_signal(&thread->md.pthread_cond);
+ PR_ASSERT(rv == 0);
+ pthread_mutex_unlock(&thread->md.pthread_mutex);
+
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for AIX */
+PR_IMPLEMENT(void)
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_CreateThread(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PRIntn is;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ pthread_attr_t attr;
+
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_INTSOFF(is);
+
+ if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+
+ if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+ thread->flags |= _PR_GLOBAL_SCOPE;
+
+ pthread_attr_init(&attr); /* initialize attr with default attributes */
+ if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ pthread_cond_destroy(&thread->md.pthread_cond);
+ pthread_attr_destroy(&attr);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+
+ thread->md.wait = 0;
+ if (pthread_create(&thread->md.pthread, &attr, start, (void *)thread)
+ == 0) {
+ _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
+ _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_SUCCESS;
+ } else {
+ pthread_mutex_destroy(&thread->md.pthread_mutex);
+ pthread_cond_destroy(&thread->md.pthread_cond);
+ pthread_attr_destroy(&attr);
+ _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
+ if (!_PR_IS_NATIVE_THREAD(me))
+ _PR_FAST_INTSON(is);
+ return PR_FAILURE;
+ }
+}
+
+PR_IMPLEMENT(void)
+_MD_InitRunningCPU(struct _PRCPU *cpu)
+{
+ extern int _pr_md_pipefd[2];
+
+ _MD_unix_init_running_cpu(cpu);
+ cpu->md.pthread = pthread_self();
+ if (_pr_md_pipefd[0] >= 0) {
+ _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
+ FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
+ }
+}
+
+
+void
+_MD_CleanupBeforeExit(void)
+{
+#if 0
+ extern PRInt32 _pr_cpus_exit;
+
+ _pr_irix_exit_now = 1;
+ if (_pr_numCPU > 1) {
+ /*
+ * Set a global flag, and wakeup all cpus which will notice the flag
+ * and exit.
+ */
+ _pr_cpus_exit = getpid();
+ _MD_Wakeup_CPUs();
+ while(_pr_numCPU > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _pr_numCPU--;
+ }
+ }
+ /*
+ * cause global threads on the recycle list to exit
+ */
+ _PR_DEADQ_LOCK;
+ if (_PR_NUM_DEADNATIVE != 0) {
+ PRThread *thread;
+ PRCList *ptr;
+
+ ptr = _PR_DEADNATIVEQ.next;
+ while( ptr != &_PR_DEADNATIVEQ ) {
+ thread = _PR_THREAD_PTR(ptr);
+ _MD_CVAR_POST_SEM(thread);
+ ptr = ptr->next;
+ }
+ }
+ _PR_DEADQ_UNLOCK;
+ while(_PR_NUM_DEADNATIVE > 1) {
+ _PR_WAIT_SEM(_pr_irix_exit_sem);
+ _PR_DEC_DEADNATIVE;
+ }
+#endif
+}
diff --git a/pr/src/md/unix/reliantunix.c b/pr/src/md/unix/reliantunix.c
new file mode 100644
index 00000000..755d7f8c
--- /dev/null
+++ b/pr/src/md/unix/reliantunix.c
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+/*
+ * ReliantUNIX5.4 - copied from unixware.c by chrisk 040497
+ */
+#include "primpl.h"
+
+#include <ucontext.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) _GETCONTEXT(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+#ifdef ALARMS_BREAK_TCP /* I don't think they do */
+
+PRInt32 _MD_connect(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _connect(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+}
+
+PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _accept(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+ return(rv);
+}
+#endif
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for ReliantUNIX */
+/* Why? Just copied it from UNIXWARE... flying-by-night, chrisk 040497 */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for ReliantUNIX.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRUintn priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for ReliantUNIX.");
+#if defined(SNI) && !defined(__GNUC__)
+ /* make compiler happy */
+ return (PRStatus)NULL;
+#endif
+}
diff --git a/pr/src/md/unix/scoos.c b/pr/src/md/unix/scoos.c
new file mode 100644
index 00000000..14365e8e
--- /dev/null
+++ b/pr/src/md/unix/scoos.c
@@ -0,0 +1,154 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+/*
+ * SCO ODT 5.0 - originally created by mikep
+ */
+#include "primpl.h"
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+#ifdef ALARMS_BREAK_TCP /* I don't think they do */
+
+PRInt32 _MD_connect(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _connect(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+}
+
+PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _accept(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+ return(rv);
+}
+#endif
+
+/*
+ * These are also implemented in pratom.c using NSPR locks. Any reason
+ * this might be better or worse? If you like this better, define
+ * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h
+ */
+#ifdef _PR_HAVE_ATOMIC_OPS
+/* Atomic operations */
+#include <stdio.h>
+static FILE *_uw_semf;
+
+void
+_MD_INIT_ATOMIC(void)
+{
+ /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */
+ if ((_uw_semf = tmpfile()) == NULL)
+ PR_ASSERT(0);
+
+ return;
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)++;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)--;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ flockfile(_uw_semf);
+ *val = newval;
+ unflockfile(_uw_semf);
+}
+#endif
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for SCO */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for SCO.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for SCO.");
+}
diff --git a/pr/src/md/unix/solaris.c b/pr/src/md/unix/solaris.c
new file mode 100644
index 00000000..8658ebbb
--- /dev/null
+++ b/pr/src/md/unix/solaris.c
@@ -0,0 +1,808 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+
+extern PRBool suspendAllOn;
+extern PRThread *suspendAllThread;
+
+extern void _MD_SET_PRIORITY(_MDThread *md, PRThreadPriority newPri);
+
+PRIntervalTime _MD_Solaris_TicksPerSecond(void)
+{
+ /*
+ * Ticks have a 10-microsecond resolution. So there are
+ * 100000 ticks per second.
+ */
+ return 100000UL;
+}
+
+/* Interval timers, implemented using gethrtime() */
+
+PRIntervalTime _MD_Solaris_GetInterval(void)
+{
+ union {
+ hrtime_t hrt; /* hrtime_t is a 64-bit (long long) integer */
+ PRInt64 pr64;
+ } time;
+ PRInt64 resolution;
+ PRIntervalTime ticks;
+
+ time.hrt = gethrtime(); /* in nanoseconds */
+ /*
+ * Convert from nanoseconds to ticks. A tick's resolution is
+ * 10 microseconds, or 10000 nanoseconds.
+ */
+ LL_I2L(resolution, 10000);
+ LL_DIV(time.pr64, time.pr64, resolution);
+ LL_L2UI(ticks, time.pr64);
+ return ticks;
+}
+
+#ifdef _PR_PTHREADS
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np)
+{
+ *np = 0;
+ return NULL;
+}
+#endif /* _PR_PTHREADS */
+
+#if defined(_PR_HAVE_ATOMIC_OPS)
+/* NOTE:
+ * SPARC v9 (Ultras) do have an atomic test-and-set operation. But
+ * SPARC v8 doesn't. We should detect in the init if we are running on
+ * v8 or v9, and then use assembly where we can.
+ *
+ * This code uses the Solaris threads API. It can be used in both the
+ * pthreads and Solaris threads versions of nspr20 because "POSIX threads
+ * and Solaris threads are fully compatible even within the same process",
+ * to quote from pthread_create(3T).
+ */
+
+#include <thread.h>
+#include <synch.h>
+
+static mutex_t _solaris_atomic = DEFAULTMUTEX;
+
+PRInt32
+_MD_AtomicIncrement(PRInt32 *val)
+{
+ PRInt32 rv;
+ if (mutex_lock(&_solaris_atomic) != 0)
+ PR_ASSERT(0);
+
+ rv = ++(*val);
+
+ if (mutex_unlock(&_solaris_atomic) != 0)\
+ PR_ASSERT(0);
+
+ return rv;
+}
+
+PRInt32
+_MD_AtomicDecrement(PRInt32 *val)
+{
+ PRInt32 rv;
+ if (mutex_lock(&_solaris_atomic) != 0)
+ PR_ASSERT(0);
+
+ rv = --(*val);
+
+ if (mutex_unlock(&_solaris_atomic) != 0)\
+ PR_ASSERT(0);
+
+ return rv;
+}
+
+PRInt32
+_MD_AtomicSet(PRInt32 *val, PRInt32 newval)
+{
+ PRInt32 rv;
+ if (mutex_lock(&_solaris_atomic) != 0)
+ PR_ASSERT(0);
+
+ rv = *val;
+ *val = newval;
+
+ if (mutex_unlock(&_solaris_atomic) != 0)\
+ PR_ASSERT(0);
+
+ return rv;
+}
+#endif /* _PR_HAVE_ATOMIC_OPS */
+
+#if defined(_PR_GLOBAL_THREADS_ONLY)
+#include <signal.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <thread.h>
+
+#include <sys/lwp.h>
+#include <sys/procfs.h>
+#include <sys/syscall.h>
+extern int syscall(); /* not declared in sys/syscall.h */
+
+static sigset_t old_mask; /* store away original gc thread sigmask */
+static PRIntn gcprio; /* store away original gc thread priority */
+
+THREAD_KEY_T threadid_key;
+THREAD_KEY_T cpuid_key;
+THREAD_KEY_T last_thread_key;
+static sigset_t set, oldset;
+
+void _MD_EarlyInit(void)
+{
+ THR_KEYCREATE(&threadid_key, NULL);
+ THR_KEYCREATE(&cpuid_key, NULL);
+ THR_KEYCREATE(&last_thread_key, NULL);
+ sigemptyset(&set);
+ sigaddset(&set, SIGALRM);
+}
+
+PRStatus _MD_CreateThread(PRThread *thread,
+ void (*start)(void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PRInt32 flags;
+
+ /* mask out SIGALRM for native thread creation */
+ thr_sigsetmask(SIG_BLOCK, &set, &oldset);
+
+ flags = (state == PR_JOINABLE_THREAD) ?
+ THR_SUSPENDED : THR_SUSPENDED|THR_DETACHED;
+ if (thread->flags & (_PR_GCABLE_THREAD|_PR_BOUND_THREAD))
+ flags |= THR_BOUND;
+
+ if (thr_create(NULL, thread->stack->stackSize,
+ (void *(*)(void *)) start, (void *) thread,
+ flags,
+ &thread->md.handle)) {
+ thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
+ return PR_FAILURE;
+ }
+
+ /* When the thread starts running, then the lwpid is set to the right
+ * value. Until then we want to mark this as 'uninit' so that
+ * its register state is initialized properly for GC */
+
+ thread->md.lwpid = -1;
+ thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
+ _MD_NEW_SEM(&thread->md.waiter_sem, 0);
+
+ if (scope == PR_GLOBAL_THREAD) {
+ thread->flags |= _PR_GLOBAL_SCOPE;
+ }
+
+ _MD_SET_PRIORITY(&(thread->md), priority);
+
+ /* Activate the thread */
+ if (thr_continue( thread->md.handle ) ) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+void _MD_cleanup_thread(PRThread *thread)
+{
+ thread_t hdl;
+
+ hdl = thread->md.handle;
+
+ /*
+ ** First, suspend the thread (unless it's the active one)
+ ** Because we suspend it first, we don't have to use LOCK_SCHEDULER to
+ ** prevent both of us modifying the thread structure at the same time.
+ */
+ if ( thread != _PR_MD_CURRENT_THREAD() ) {
+ thr_suspend(hdl);
+ }
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("(0X%x)[DestroyThread]\n", thread));
+
+ _MD_DESTROY_SEM(&thread->md.waiter_sem);
+}
+
+void _MD_exit_thread(PRThread *thread)
+{
+ _MD_CLEAN_THREAD(thread);
+ _MD_SET_CURRENT_THREAD(NULL);
+}
+
+void _MD_SET_PRIORITY(_MDThread *md_thread,
+ PRThreadPriority newPri)
+{
+ PRIntn nativePri;
+
+ if (newPri < PR_PRIORITY_FIRST) {
+ newPri = PR_PRIORITY_FIRST;
+ } else if (newPri > PR_PRIORITY_LAST) {
+ newPri = PR_PRIORITY_LAST;
+ }
+ /* Solaris priorities are from 0 to 127 */
+ nativePri = newPri * 127 / PR_PRIORITY_LAST;
+ if(thr_setprio((thread_t)md_thread->handle, nativePri)) {
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("_PR_SetThreadPriority: can't set thread priority\n"));
+ }
+}
+
+void _MD_WAIT_CV(
+ struct _MDCVar *md_cv, struct _MDLock *md_lock, PRIntervalTime timeout)
+{
+ struct timespec tt;
+ PRUint32 msec;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ PR_ASSERT((!suspendAllOn) || (suspendAllThread != me));
+
+ if (PR_INTERVAL_NO_TIMEOUT == timeout) {
+ COND_WAIT(&md_cv->cv, &md_lock->lock);
+ } else {
+ msec = PR_IntervalToMilliseconds(timeout);
+
+ GETTIME(&tt);
+ tt.tv_sec += msec / PR_MSEC_PER_SEC;
+ tt.tv_nsec += (msec % PR_MSEC_PER_SEC) * PR_NSEC_PER_MSEC;
+ /* Check for nsec overflow - otherwise we'll get an EINVAL */
+ if (tt.tv_nsec >= PR_NSEC_PER_SEC) {
+ tt.tv_sec++;
+ tt.tv_nsec -= PR_NSEC_PER_SEC;
+ }
+ COND_TIMEDWAIT(&md_cv->cv, &md_lock->lock, &tt);
+ }
+}
+
+void _MD_lock(struct _MDLock *md_lock)
+{
+#ifdef DEBUG
+ /* This code was used for GC testing to make sure that we didn't attempt
+ * to grab any locks while threads are suspended.
+ */
+ PRLock *lock;
+
+ if ((suspendAllOn) && (suspendAllThread == _PR_MD_CURRENT_THREAD())) {
+ lock = ((PRLock *) ((char*) (md_lock) - offsetof(PRLock,ilock)));
+ PR_ASSERT(lock->owner == NULL);
+ return;
+ }
+#endif /* DEBUG */
+
+ mutex_lock(&md_lock->lock);
+}
+
+PRThread *_pr_current_thread_tls()
+{
+ PRThread *ret;
+
+ thr_getspecific(threadid_key, (void **)&ret);
+ return ret;
+}
+
+PRStatus
+_MD_wait(PRThread *thread, PRIntervalTime ticks)
+{
+ _MD_WAIT_SEM(&thread->md.waiter_sem);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WakeupWaiter(PRThread *thread)
+{
+ if (thread == NULL) {
+ return PR_SUCCESS;
+ }
+ _MD_POST_SEM(&thread->md.waiter_sem);
+ return PR_SUCCESS;
+}
+
+_PRCPU *_pr_current_cpu_tls()
+{
+ _PRCPU *ret;
+
+ thr_getspecific(cpuid_key, (void **)&ret);
+ return ret;
+}
+
+PRThread *_pr_last_thread_tls()
+{
+ PRThread *ret;
+
+ thr_getspecific(last_thread_key, (void **)&ret);
+ return ret;
+}
+
+_MDLock _pr_ioq_lock;
+
+void
+_MD_InitIO(void)
+{
+ _MD_NEW_LOCK(&_pr_ioq_lock);
+}
+
+PRStatus _MD_InitializeThread(PRThread *thread)
+{
+ if (!_PR_IS_NATIVE_THREAD(thread))
+ return PR_SUCCESS;
+ /* sol_curthread is an asm routine which grabs GR7; GR7 stores an internal
+ * thread structure ptr used by solaris. We'll use this ptr later
+ * with suspend/resume to find which threads are running on LWPs.
+ */
+ thread->md.threadID = sol_curthread();
+ /* prime the sp; substract 4 so we don't hit the assert that
+ * curr sp > base_stack
+ */
+ thread->md.sp = (uint_t) thread->stack->allocBase - sizeof(long);
+ thread->md.lwpid = _lwp_self();
+ thread->md.handle = THR_SELF();
+
+ /* all threads on Solaris are global threads from NSPR's perspective
+ * since all of them are mapped to Solaris threads.
+ */
+ thread->flags |= _PR_GLOBAL_SCOPE;
+
+ /* For primordial/attached thread, we don't create an underlying native thread.
+ * So, _MD_CREATE_THREAD() does not get called. We need to do initialization
+ * like allocating thread's synchronization variables and set the underlying
+ * native thread's priority.
+ */
+ if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) {
+ _MD_NEW_SEM(&thread->md.waiter_sem, 0);
+ _MD_SET_PRIORITY(&(thread->md), thread->priority);
+ }
+ return PR_SUCCESS;
+}
+
+/* Sleep for n milliseconds, n < 1000 */
+void solaris_msec_sleep(int n)
+{
+ struct timespec ts;
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000*n;
+ if (syscall(SYS_nanosleep, &ts, 0, 0) < 0) {
+ PR_ASSERT(0);
+ }
+}
+
+#define VALID_SP(sp, bottom, top) \
+ (((uint_t)(sp)) > ((uint_t)(bottom)) && ((uint_t)(sp)) < ((uint_t)(top)))
+
+void solaris_record_regs(PRThread *t, prstatus_t *lwpstatus)
+{
+#ifdef sparc
+ long *regs = (long *)&t->md.context.uc_mcontext.gregs[0];
+
+ PR_ASSERT(t->flags & _PR_GCABLE_THREAD);
+ PR_ASSERT(t->md.threadID == lwpstatus->pr_reg[REG_G7]);
+
+ t->md.sp = lwpstatus->pr_reg[REG_SP];
+ PR_ASSERT(VALID_SP(t->md.sp, t->stack->stackBottom, t->stack->stackTop));
+
+ regs[0] = lwpstatus->pr_reg[R_G1];
+ regs[1] = lwpstatus->pr_reg[R_G2];
+ regs[2] = lwpstatus->pr_reg[R_G3];
+ regs[3] = lwpstatus->pr_reg[R_G4];
+ regs[4] = lwpstatus->pr_reg[R_O0];
+ regs[5] = lwpstatus->pr_reg[R_O1];
+ regs[6] = lwpstatus->pr_reg[R_O2];
+ regs[7] = lwpstatus->pr_reg[R_O3];
+ regs[8] = lwpstatus->pr_reg[R_O4];
+ regs[9] = lwpstatus->pr_reg[R_O5];
+ regs[10] = lwpstatus->pr_reg[R_O6];
+ regs[11] = lwpstatus->pr_reg[R_O7];
+#elif defined(i386)
+ /*
+ * To be implemented and tested
+ */
+ PR_ASSERT(0);
+ PR_ASSERT(t->md.threadID == lwpstatus->pr_reg[GS]);
+ t->md.sp = lwpstatus->pr_reg[UESP];
+#endif
+}
+
+void solaris_preempt_off()
+{
+ sigset_t set;
+
+ (void)sigfillset(&set);
+ syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
+}
+
+void solaris_preempt_on()
+{
+ syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
+}
+
+int solaris_open_main_proc_fd()
+{
+ char buf[30];
+ int fd;
+
+ /* Not locked, so must be created while threads coming up */
+ PR_snprintf(buf, sizeof(buf), "/proc/%ld", getpid());
+ if ( (fd = syscall(SYS_open, buf, O_RDONLY)) < 0) {
+ return -1;
+ }
+ return fd;
+}
+
+/* Return a file descriptor for the /proc entry corresponding to the
+ * given lwp.
+ */
+int solaris_open_lwp(lwpid_t id, int lwp_main_proc_fd)
+{
+ int result;
+
+ if ( (result = syscall(SYS_ioctl, lwp_main_proc_fd, PIOCOPENLWP, &id)) <0)
+ return -1; /* exited??? */
+
+ return result;
+}
+void _MD_Begin_SuspendAll()
+{
+ solaris_preempt_off();
+
+ PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin_SuspendAll\n"));
+ /* run at highest prio so I cannot be preempted */
+ thr_getprio(thr_self(), &gcprio);
+ thr_setprio(thr_self(), 0x7fffffff);
+ suspendAllOn = PR_TRUE;
+ suspendAllThread = _PR_MD_CURRENT_THREAD();
+}
+
+void _MD_End_SuspendAll()
+{
+}
+
+void _MD_End_ResumeAll()
+{
+ PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("End_ResumeAll\n"));
+ thr_setprio(thr_self(), gcprio);
+ solaris_preempt_on();
+ suspendAllThread = NULL;
+ suspendAllOn = PR_FALSE;
+}
+
+void _MD_Suspend(PRThread *thr)
+{
+ int lwp_fd, result;
+ prstatus_t lwpstatus;
+ int lwp_main_proc_fd = 0;
+
+ if (!(thr->flags & _PR_GCABLE_THREAD) || !suspendAllOn){
+ /*XXX When the suspendAllOn is set, we will be trying to do lwp_suspend
+ * during that time we can't call any thread lib or libc calls. Hence
+ * make sure that no suspension is requested for Non gcable thread
+ * during suspendAllOn */
+ PR_ASSERT(!suspendAllOn);
+ thr_suspend(thr->md.handle);
+ return;
+ }
+
+ /* XXX Primordial thread can't be bound to an lwp, hence there is no
+ * way we can assume that we can get the lwp status for primordial
+ * thread reliably. Hence we skip this for primordial thread, hoping
+ * that the SP is saved during lock and cond. wait.
+ * XXX - Again this is concern only for java interpreter, not for the
+ * server, 'cause primordial thread in the server does not do java work
+ */
+ if (thr->flags & _PR_PRIMORDIAL)
+ return;
+
+ /* XXX Important Note: If the start function of a thread is not called,
+ * lwpid is -1. Then, skip this thread. This thread will get caught
+ * in PR_NativeRunThread before calling the start function, because
+ * we hold the pr_activeLock during suspend/resume */
+
+ /* if the thread is not started yet then don't do anything */
+ if (!suspendAllOn || thr->md.lwpid == -1)
+ return;
+
+ if (_lwp_suspend(thr->md.lwpid) < 0) {
+ PR_ASSERT(0);
+ return;
+ }
+
+ if ( (lwp_main_proc_fd = solaris_open_main_proc_fd()) < 0) {
+ PR_ASSERT(0);
+ return; /* XXXMB ARGH, we're hosed! */
+ }
+
+ if ( (lwp_fd = solaris_open_lwp(thr->md.lwpid, lwp_main_proc_fd)) < 0) {
+ PR_ASSERT(0);
+ close(lwp_main_proc_fd);
+ return;
+ }
+ if ( (result = syscall(SYS_ioctl, lwp_fd, PIOCSTATUS, &lwpstatus)) < 0) {
+ /* Hopefully the thread just died... */
+ close(lwp_fd);
+ close(lwp_main_proc_fd);
+ return;
+ }
+ while ( !(lwpstatus.pr_flags & PR_STOPPED) ) {
+ if ( (result = syscall(SYS_ioctl, lwp_fd, PIOCSTATUS, &lwpstatus)) < 0) {
+ PR_ASSERT(0); /* ARGH SOMETHING WRONG! */
+ break;
+ }
+ solaris_msec_sleep(1);
+ }
+ solaris_record_regs(thr, &lwpstatus);
+ close(lwp_fd);
+ close(lwp_main_proc_fd);
+}
+
+#ifdef OLD_CODE
+
+void _MD_SuspendAll()
+{
+ /* On solaris there are threads, and there are LWPs.
+ * Calling _PR_DoSingleThread would freeze all of the threads bound to LWPs
+ * but not necessarily stop all LWPs (for example if someone did
+ * an attachthread of a thread which was not bound to an LWP).
+ * So now go through all the LWPs for this process and freeze them.
+ *
+ * Note that if any thread which is capable of having the GC run on it must
+ * had better be a LWP with a single bound thread on it. Otherwise, this
+ * might not stop that thread from being run.
+ */
+ PRThread *current = _PR_MD_CURRENT_THREAD();
+ prstatus_t status, lwpstatus;
+ int result, index, lwp_fd;
+ lwpid_t me = _lwp_self();
+ int err;
+ int lwp_main_proc_fd;
+
+ solaris_preempt_off();
+
+ /* run at highest prio so I cannot be preempted */
+ thr_getprio(thr_self(), &gcprio);
+ thr_setprio(thr_self(), 0x7fffffff);
+
+ current->md.sp = (uint_t)&me; /* set my own stack pointer */
+
+ if ( (lwp_main_proc_fd = solaris_open_main_proc_fd()) < 0) {
+ PR_ASSERT(0);
+ solaris_preempt_on();
+ return; /* XXXMB ARGH, we're hosed! */
+ }
+
+ if ( (result = syscall(SYS_ioctl, lwp_main_proc_fd, PIOCSTATUS, &status)) < 0) {
+ err = errno;
+ PR_ASSERT(0);
+ goto failure; /* XXXMB ARGH, we're hosed! */
+ }
+
+ num_lwps = status.pr_nlwp;
+
+ if ( (all_lwps = (lwpid_t *)PR_MALLOC((num_lwps+1) * sizeof(lwpid_t)))==NULL) {
+ PR_ASSERT(0);
+ goto failure; /* XXXMB ARGH, we're hosed! */
+ }
+
+ if ( (result = syscall(SYS_ioctl, lwp_main_proc_fd, PIOCLWPIDS, all_lwps)) < 0) {
+ PR_ASSERT(0);
+ PR_DELETE(all_lwps);
+ goto failure; /* XXXMB ARGH, we're hosed! */
+ }
+
+ for (index=0; index< num_lwps; index++) {
+ if (all_lwps[index] != me) {
+ if (_lwp_suspend(all_lwps[index]) < 0) {
+ /* could happen if lwp exited */
+ all_lwps[index] = me; /* dummy it up */
+ }
+ }
+ }
+
+ /* Turns out that lwp_suspend is not a blocking call.
+ * Go through the list and make sure they are all stopped.
+ */
+ for (index=0; index< num_lwps; index++) {
+ if (all_lwps[index] != me) {
+ if ( (lwp_fd = solaris_open_lwp(all_lwps[index], lwp_main_proc_fd)) < 0) {
+ PR_ASSERT(0);
+ PR_DELETE(all_lwps);
+ all_lwps = NULL;
+ goto failure; /* XXXMB ARGH, we're hosed! */
+ }
+
+ if ( (result = syscall(SYS_ioctl, lwp_fd, PIOCSTATUS, &lwpstatus)) < 0) {
+ /* Hopefully the thread just died... */
+ close(lwp_fd);
+ continue;
+ }
+ while ( !(lwpstatus.pr_flags & PR_STOPPED) ) {
+ if ( (result = syscall(SYS_ioctl, lwp_fd, PIOCSTATUS, &lwpstatus)) < 0) {
+ PR_ASSERT(0); /* ARGH SOMETHING WRONG! */
+ break;
+ }
+ solaris_msec_sleep(1);
+ }
+ solaris_record_regs(&lwpstatus);
+ close(lwp_fd);
+ }
+ }
+
+ close(lwp_main_proc_fd);
+
+ return;
+failure:
+ solaris_preempt_on();
+ thr_setprio(thr_self(), gcprio);
+ close(lwp_main_proc_fd);
+ return;
+}
+
+void _MD_ResumeAll()
+{
+ int i;
+ lwpid_t me = _lwp_self();
+
+ for (i=0; i < num_lwps; i++) {
+ if (all_lwps[i] == me)
+ continue;
+ if ( _lwp_continue(all_lwps[i]) < 0) {
+ PR_ASSERT(0); /* ARGH, we are hosed! */
+ }
+ }
+
+ /* restore priority and sigmask */
+ thr_setprio(thr_self(), gcprio);
+ solaris_preempt_on();
+ PR_DELETE(all_lwps);
+ all_lwps = NULL;
+}
+#endif /* OLD_CODE */
+
+#ifdef USE_SETJMP
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+#else
+PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np)
+{
+ if (isCurrent) {
+ (void) getcontext(CONTEXT(t));
+ }
+ *np = NGREG;
+ return (PRWord*) &t->md.context.uc_mcontext.gregs[0];
+}
+#endif /* USE_SETJMP */
+
+#else /* _PR_GLOBAL_THREADS_ONLY */
+
+#if defined(_PR_LOCAL_THREADS_ONLY)
+
+void _MD_EarlyInit(void)
+{
+}
+
+void _MD_SolarisInit()
+{
+ _PR_UnixInit();
+}
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRThreadPriority newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ PR_ASSERT((thread == NULL) || (!(thread->flags & _PR_GLOBAL_SCOPE)));
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Solaris */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Solaris");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Solaris");
+ return(PR_FAILURE);
+}
+
+#ifdef USE_SETJMP
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+#else
+PRWord *_MD_HomeGCRegisters(PRThread *t, PRIntn isCurrent, PRIntn *np)
+{
+ if (isCurrent) {
+ (void) getcontext(CONTEXT(t));
+ }
+ *np = NGREG;
+ return (PRWord*) &t->md.context.uc_mcontext.gregs[0];
+}
+#endif /* USE_SETJMP */
+
+#endif /* _PR_LOCAL_THREADS_ONLY */
+
+#endif /* _PR_GLOBAL_THREADS_ONLY */
+
+#ifndef _PR_PTHREADS
+#if defined(i386) && defined(SOLARIS2_4)
+/*
+ * Because clock_gettime() on Solaris/x86 2.4 always generates a
+ * segmentation fault, we use an emulated version _pr_solx86_clock_gettime(),
+ * which is implemented using gettimeofday().
+ */
+
+int
+_pr_solx86_clock_gettime(clockid_t clock_id, struct timespec *tp)
+{
+ struct timeval tv;
+
+ if (clock_id != CLOCK_REALTIME) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ gettimeofday(&tv);
+ tp->tv_sec = tv.tv_sec;
+ tp->tv_nsec = tv.tv_usec * 1000;
+ return 0;
+}
+#endif /* i386 && SOLARIS2_4 */
+#endif /* _PR_PTHREADS */
diff --git a/pr/src/md/unix/sony.c b/pr/src/md/unix/sony.c
new file mode 100644
index 00000000..d3ce90c4
--- /dev/null
+++ b/pr/src/md/unix/sony.c
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+#include <signal.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+#ifndef _PR_PTHREADS
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t), 1);
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+#else
+ *np = 0;
+ return NULL;
+#endif
+}
+
+#ifndef _PR_PTHREADS
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Sony */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for SONY.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for SONY.");
+ return PR_FAILURE;
+}
+#endif /* ! _PR_PTHREADS */
diff --git a/pr/src/md/unix/sunos4.c b/pr/src/md/unix/sunos4.c
new file mode 100644
index 00000000..0b4b9b4f
--- /dev/null
+++ b/pr/src/md/unix/sunos4.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include <setjmp.h>
+#include "primpl.h"
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRStatus _MD_CREATE_THREAD(PRThread *thread,
+ void (*start)(void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for SunOS 4.1.3.");
+ return PR_FAILURE;
+}
+
+void _MD_SET_PRIORITY(_MDThread *md_thread, PRUintn newPri)
+{
+ PR_NOT_REACHED("_MD_SET_PRIORITY should not be called for user-level threads.");
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for SunOS 4.1.3.");
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
diff --git a/pr/src/md/unix/unix.c b/pr/src/md/unix/unix.c
new file mode 100644
index 00000000..4148fb4d
--- /dev/null
+++ b/pr/src/md/unix/unix.c
@@ -0,0 +1,3210 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+#include <signal.h>
+#include <unistd.h>
+#include <memory.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+/* To get FIONREAD */
+#if defined(NCR) || defined(UNIXWARE) || defined(NEC) || defined(SNI) \
+ || defined(SONY)
+#include <sys/filio.h>
+#endif
+
+/*
+ * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or
+ * PRInt32* pointer to a _PRSockLen_t* pointer.
+ */
+#if defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \
+ || defined(AIX4_1) || defined(LINUX) || defined(SONY) \
+ || defined(BSDI) || defined(SCO) || defined(NEC) || defined(SNI) \
+ || defined(SUNOS4)
+#define _PRSockLen_t int
+#elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \
+ || defined(UNIXWARE)
+#define _PRSockLen_t size_t
+#else
+#error "Cannot determine architecture"
+#endif
+
+/*
+** Global lock variable used to bracket calls into rusty libraries that
+** aren't thread safe (like libc, libX, etc).
+*/
+static PRLock *_pr_rename_lock = NULL;
+static PRMonitor *_pr_Xfe_mon = NULL;
+
+/*
+ * Variables used by the GC code, initialized in _MD_InitSegs().
+ * _pr_zero_fd should be a static variable. Unfortunately, there is
+ * still some Unix-specific code left in function PR_GrowSegment()
+ * in file memory/prseg.c that references it, so it needs
+ * to be a global variable for now.
+ */
+PRInt32 _pr_zero_fd = -1;
+static PRLock *_pr_md_lock = NULL;
+
+sigset_t timer_set;
+
+#if !defined(_PR_PTHREADS)
+
+static sigset_t empty_set;
+
+#ifdef SOLARIS
+#include <sys/file.h>
+#include <sys/filio.h>
+#endif
+
+#ifndef PIPE_BUF
+#define PIPE_BUF 512
+#endif
+
+#ifndef PROT_NONE
+#define PROT_NONE 0
+#endif
+
+/*
+ * _nspr_noclock - if set clock interrupts are disabled
+ */
+int _nspr_noclock = 0;
+
+#ifdef IRIX
+extern PRInt32 _nspr_terminate_on_error;
+#endif
+
+/*
+ * There is an assertion in this code that NSPR's definition of PRIOVec
+ * is bit compatible with UNIX' definition of a struct iovec. This is
+ * applicable to the 'writev()' operations where the types are casually
+ * cast to avoid warnings.
+ */
+
+int _pr_md_pipefd[2] = { -1, -1 };
+static char _pr_md_pipebuf[PIPE_BUF];
+
+_PRInterruptTable _pr_interruptTable[] = {
+ {
+ "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, },
+ {
+ 0 }
+};
+
+PR_IMPLEMENT(void) _MD_unix_init_running_cpu(_PRCPU *cpu)
+{
+ PR_INIT_CLIST(&(cpu->md.md_unix.ioQ));
+ cpu->md.md_unix.ioq_max_osfd = -1;
+ cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT;
+}
+
+PRStatus _MD_open_dir(_MDDir *d, const char *name)
+{
+int err;
+
+ d->d = opendir(name);
+ if (!d->d) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_OPENDIR_ERROR(err);
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+PRInt32 _MD_close_dir(_MDDir *d)
+{
+int rv = 0, err;
+
+ if (d->d) {
+ rv = closedir(d->d);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_CLOSEDIR_ERROR(err);
+ }
+ }
+ return rv;
+}
+
+char * _MD_read_dir(_MDDir *d, PRIntn flags)
+{
+struct dirent *de;
+int err;
+
+ for (;;) {
+ /*
+ * XXX: readdir() is not MT-safe. There is an MT-safe version
+ * readdir_r() on some systems.
+ */
+ de = readdir(d->d);
+ if (!de) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_READDIR_ERROR(err);
+ return 0;
+ }
+ if ((flags & PR_SKIP_DOT) &&
+ (de->d_name[0] == '.') && (de->d_name[1] == 0))
+ continue;
+ if ((flags & PR_SKIP_DOT_DOT) &&
+ (de->d_name[0] == '.') && (de->d_name[1] == '.') &&
+ (de->d_name[2] == 0))
+ continue;
+ if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.'))
+ continue;
+ break;
+ }
+ return de->d_name;
+}
+
+PRInt32 _MD_delete(const char *name)
+{
+PRInt32 rv, err;
+#ifdef UNIXWARE
+ sigset_t set, oset;
+#endif
+
+#ifdef UNIXWARE
+ sigfillset(&set);
+ sigprocmask(SIG_SETMASK, &set, &oset);
+#endif
+ rv = unlink(name);
+#ifdef UNIXWARE
+ sigprocmask(SIG_SETMASK, &oset, NULL);
+#endif
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_UNLINK_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info)
+{
+ struct stat sb;
+ PRInt64 s, s2us;
+ PRInt32 rv, err;
+
+ rv = stat(fn, &sb);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_STAT_ERROR(err);
+ } else if (info) {
+ if (S_IFREG & sb.st_mode)
+ info->type = PR_FILE_FILE ;
+ else if (S_IFDIR & sb.st_mode)
+ info->type = PR_FILE_DIRECTORY;
+ else
+ info->type = PR_FILE_OTHER;
+ info->size = sb.st_size;
+ LL_I2L(s, sb.st_mtime);
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_MUL(s, s, s2us);
+ info->modifyTime = s;
+ LL_I2L(s, sb.st_ctime);
+ LL_MUL(s, s, s2us);
+ info->creationTime = s;
+ }
+ return rv;
+}
+
+PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info)
+{
+ PRFileInfo info32;
+ PRInt32 rv = _MD_getfileinfo(fn, &info32);
+ if (rv >= 0)
+ {
+ info->type = info32.type;
+ LL_I2L(info->size, info32.size);
+ info->modifyTime = info32.modifyTime;
+ info->creationTime = info32.creationTime;
+ }
+ return rv;
+}
+
+PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info)
+{
+ struct stat sb;
+ PRInt64 s, s2us;
+ PRInt32 rv, err;
+
+ rv = fstat(fd->secret->md.osfd, &sb);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_FSTAT_ERROR(err);
+ } else if (info) {
+ if (info) {
+ if (S_IFREG & sb.st_mode)
+ info->type = PR_FILE_FILE ;
+ else if (S_IFDIR & sb.st_mode)
+ info->type = PR_FILE_DIRECTORY;
+ else
+ info->type = PR_FILE_OTHER;
+ info->size = sb.st_size;
+ LL_I2L(s, sb.st_mtime);
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_MUL(s, s, s2us);
+ info->modifyTime = s;
+ LL_I2L(s, sb.st_ctime);
+ LL_MUL(s, s, s2us);
+ info->creationTime = s;
+ }
+ }
+ return rv;
+}
+
+PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info)
+{
+ PRFileInfo info32;
+ PRInt32 rv = _MD_getopenfileinfo(fd, &info32);
+ if (rv >= 0)
+ {
+ info->type = info32.type;
+ LL_I2L(info->size, info32.size);
+ info->modifyTime = info32.modifyTime;
+ info->creationTime = info32.creationTime;
+ }
+ return rv;
+}
+
+PRInt32 _MD_rename(const char *from, const char *to)
+{
+ PRInt32 rv = -1, err;
+
+ /*
+ ** This is trying to enforce the semantics of WINDOZE' rename
+ ** operation. That means one is not allowed to rename over top
+ ** of an existing file. Holding a lock across these two function
+ ** and the open function is known to be a bad idea, but ....
+ */
+ if (NULL != _pr_rename_lock)
+ PR_Lock(_pr_rename_lock);
+ if (0 == access(to, F_OK))
+ PR_SetError(PR_FILE_EXISTS_ERROR, 0);
+ else
+ {
+ rv = rename(from, to);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_RENAME_ERROR(err);
+ }
+ }
+ if (NULL != _pr_rename_lock)
+ PR_Unlock(_pr_rename_lock);
+ return rv;
+}
+
+PRInt32 _MD_access(const char *name, PRIntn how)
+{
+PRInt32 rv, err;
+int amode;
+
+ switch (how) {
+ case PR_ACCESS_WRITE_OK:
+ amode = W_OK;
+ break;
+ case PR_ACCESS_READ_OK:
+ amode = R_OK;
+ break;
+ case PR_ACCESS_EXISTS:
+ amode = F_OK;
+ break;
+ default:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ rv = access(name, amode);
+
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_ACCESS_ERROR(err);
+ }
+
+done:
+ return(rv);
+}
+
+PRInt32 _MD_mkdir(const char *name, PRIntn mode)
+{
+int rv, err;
+
+ /*
+ ** This lock is used to enforce rename semantics as described
+ ** in PR_Rename. Look there for more fun details.
+ */
+ if (NULL !=_pr_rename_lock)
+ PR_Lock(_pr_rename_lock);
+ rv = mkdir(name, mode);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_MKDIR_ERROR(err);
+ }
+ if (NULL !=_pr_rename_lock)
+ PR_Unlock(_pr_rename_lock);
+ return rv;
+}
+
+PRInt32 _MD_rmdir(const char *name)
+{
+int rv, err;
+
+ rv = rmdir(name);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_RMDIR_ERROR(err);
+ }
+ return rv;
+}
+
+PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+PRInt32 rv, err;
+fd_set rd;
+PRInt32 osfd = fd->secret->md.osfd;
+
+ FD_ZERO(&rd);
+ FD_SET(osfd, &rd);
+ while ((rv = read(osfd,buf,amount)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ _PR_WaitForFD(osfd, PR_POLL_READ, PR_INTERVAL_NO_TIMEOUT);
+ } else {
+ while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL))
+ == -1 && (err = _MD_ERRNO()) == EINTR) {
+ /* retry _MD_SELECT() if it is interrupted */
+ }
+ if (rv == -1) {
+ break;
+ }
+ }
+ if (_PR_PENDING_INTERRUPT(me))
+ break;
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ } else {
+ _PR_MD_MAP_READ_ERROR(err);
+ }
+ }
+ return(rv);
+}
+
+PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount)
+{
+PRThread *me = _PR_MD_CURRENT_THREAD();
+PRInt32 rv, err;
+fd_set wd;
+PRInt32 osfd = fd->secret->md.osfd;
+
+ FD_ZERO(&wd);
+ FD_SET(osfd, &wd);
+ while ((rv = write(osfd,buf,amount)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ _PR_WaitForFD(osfd, PR_POLL_WRITE, PR_INTERVAL_NO_TIMEOUT);
+ } else {
+ while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL))
+ == -1 && (err = _MD_ERRNO()) == EINTR) {
+ /* retry _MD_SELECT() if it is interrupted */
+ }
+ if (rv == -1) {
+ break;
+ }
+ }
+ if (_PR_PENDING_INTERRUPT(me))
+ break;
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ } else {
+ _PR_MD_MAP_WRITE_ERROR(err);
+ }
+ }
+ return(rv);
+}
+
+PRInt32 _MD_lseek(PRFileDesc *fd, PRInt32 offset, PRSeekWhence whence)
+{
+PRInt32 rv, where, err;
+
+ switch (whence) {
+ case PR_SEEK_SET:
+ where = SEEK_SET;
+ break;
+ case PR_SEEK_CUR:
+ where = SEEK_CUR;
+ break;
+ case PR_SEEK_END:
+ where = SEEK_END;
+ break;
+ default:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ rv = lseek(fd->secret->md.osfd,offset,where);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_LSEEK_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt64 _MD_lseek64(PRFileDesc *fd, PRInt64 offset, PRSeekWhence whence)
+{
+ PRInt32 off, rv;
+ PRInt64 on, result = LL_MININT;
+ LL_L2I(off, offset);
+ LL_I2L(on, off);
+ if (LL_EQ(offset, on))
+ {
+ rv = _MD_lseek(fd, off, whence);
+ if (rv >= 0) LL_I2L(result, rv);
+ }
+ else PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); /* overflow */
+ return result;
+} /* _MD_lseek64 */
+
+PRInt32 _MD_fsync(PRFileDesc *fd)
+{
+PRInt32 rv, err;
+
+ rv = fsync(fd->secret->md.osfd);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_FSYNC_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_close(PRInt32 osfd)
+{
+PRInt32 rv, err;
+
+ rv = close(osfd);
+ if (rv == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_CLOSE_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto)
+{
+ PRInt32 osfd, err;
+
+ osfd = socket(domain, type, proto);
+
+ if (osfd == -1) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SOCKET_ERROR(err);
+ return(osfd);
+ }
+
+ return(osfd);
+}
+
+PRInt32 _MD_socketavailable(PRFileDesc *fd)
+{
+ PRInt32 result;
+
+ if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) {
+ _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO());
+ return -1;
+ }
+ return result;
+}
+
+PRInt64 _MD_socketavailable64(PRFileDesc *fd)
+{
+ PRInt64 result;
+ LL_I2L(result, _MD_socketavailable(fd));
+ return result;
+} /* _MD_socketavailable64 */
+
+#define READ_FD 1
+#define WRITE_FD 2
+
+/*
+ * wait for socket i/o, periodically checking for interrupt
+ */
+
+static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv = -1;
+ struct timeval tv, *tvp;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRIntervalTime epoch, now, elapsed, remaining;
+ PRInt32 syserror;
+ fd_set rd_wr;
+
+ switch (timeout) {
+ case PR_INTERVAL_NO_WAIT:
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ break;
+ case PR_INTERVAL_NO_TIMEOUT:
+ /*
+ * This is a special case of the 'default' case below.
+ * Please see the comments there.
+ */
+ tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
+ tv.tv_usec = 0;
+ tvp = &tv;
+ FD_ZERO(&rd_wr);
+ do {
+ FD_SET(osfd, &rd_wr);
+ if (fd_type == READ_FD)
+ rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, tvp);
+ else
+ rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, tvp);
+ if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
+ if (syserror == EBADF) {
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, EBADF);
+ } else {
+ PR_SetError(PR_UNKNOWN_ERROR, syserror);
+ }
+ break;
+ }
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ break;
+ }
+ } while (rv == 0 || (rv == -1 && syserror == EINTR));
+ break;
+ default:
+ now = epoch = PR_IntervalNow();
+ remaining = timeout;
+ tvp = &tv;
+ FD_ZERO(&rd_wr);
+ do {
+ /*
+ * We block in _MD_SELECT for at most
+ * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
+ * so that there is an upper limit on the delay
+ * before the interrupt bit is checked.
+ */
+ tv.tv_sec = PR_IntervalToSeconds(remaining);
+ if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) {
+ tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_usec = PR_IntervalToMicroseconds(
+ remaining -
+ PR_SecondsToInterval(tv.tv_sec));
+ }
+ FD_SET(osfd, &rd_wr);
+ if (fd_type == READ_FD)
+ rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, tvp);
+ else
+ rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, tvp);
+ /*
+ * we don't consider EINTR a real error
+ */
+ if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
+ if (syserror == EBADF) {
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, EBADF);
+ } else {
+ PR_SetError(PR_UNKNOWN_ERROR, syserror);
+ }
+ break;
+ }
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ break;
+ }
+ /*
+ * We loop again if _MD_SELECT timed out or got interrupted
+ * by a signal, and the timeout deadline has not passed yet.
+ */
+ if (rv == 0 || (rv == -1 && syserror == EINTR)) {
+ /*
+ * If _MD_SELECT timed out, we know how much time
+ * we spent in blocking, so we can avoid a
+ * PR_IntervalNow() call.
+ */
+ if (rv == 0) {
+ now += PR_SecondsToInterval(tv.tv_sec)
+ + PR_MicrosecondsToInterval(tv.tv_usec);
+ } else {
+ now = PR_IntervalNow();
+ }
+ elapsed = (PRIntervalTime) (now - epoch);
+ if (elapsed >= timeout) {
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ rv = -1;
+ break;
+ } else {
+ remaining = timeout - elapsed;
+ }
+ }
+ } while (rv == 0 || (rv == -1 && syserror == EINTR));
+ break;
+ }
+ return(rv);
+}
+
+PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount,
+ PRInt32 flags, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+/*
+ * Many OS's (Solaris, Unixware) have a broken recv which won't read
+ * from socketpairs. As long as we don't use flags on socketpairs, this
+ * is a decent fix. - mikep
+ */
+#if defined(UNIXWARE) || defined(SOLARIS) || defined(NCR)
+ while ((rv = read(osfd,buf,amount)) == -1) {
+/*
+ while ((rv = recv(osfd,buf,amount,flags)) == -1) {
+*/
+#else
+ while ((rv = recv(osfd,buf,amount,flags)) == -1) {
+#endif
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(osfd, PR_POLL_READ, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_RECV_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount,
+ PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ while ((*addrlen = PR_NETADDR_SIZE(addr)),
+ ((rv = recvfrom(osfd, buf, amount, flags,
+ (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -1)) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(osfd, PR_POLL_READ, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_RECVFROM_ERROR(err);
+ }
+done:
+#ifdef AIX
+ if (rv != -1) {
+ /* mask off the first byte of struct sockaddr (the length field) */
+ if (addr) {
+ addr->inet.family &= 0x00ff;
+ }
+ }
+#endif
+ return(rv);
+}
+
+PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount,
+ PRInt32 flags, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ while ((rv = send(osfd,buf,amount,flags)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(osfd, PR_POLL_WRITE, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ /*
+ * optimization; if bytes sent is less than "amount" call
+ * select before returning. This is because it is likely that
+ * the next send() call will return EWOULDBLOCK.
+ */
+ if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount)
+ && (timeout != PR_INTERVAL_NO_WAIT)) {
+ if (_PR_IS_NATIVE_THREAD(me)) {
+ if (socket_io_wait(osfd, WRITE_FD, timeout)< 0)
+ goto done;
+ } else {
+ if (_PR_WaitForFD(osfd, PR_POLL_WRITE, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ }
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_SEND_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_sendto(
+ PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags,
+ const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ while ((rv = sendto(osfd, buf, amount, flags,
+ (struct sockaddr *) addr, addrlen)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(osfd, PR_POLL_WRITE, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_SENDTO_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_writev(PRFileDesc *fd, PRIOVec *iov,
+ PRInt32 iov_size, PRIntervalTime timeout)
+{
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 index, amount = 0;
+ PRInt32 osfd = fd->secret->md.osfd;
+
+ /*
+ * Calculate the total number of bytes to be sent; needed for
+ * optimization later.
+ * We could avoid this if this number was passed in; but it is
+ * probably not a big deal because iov_size is usually small (less than
+ * 3)
+ */
+ if (!fd->secret->nonblocking) {
+ for (index=0; index<iov_size; index++) {
+ amount += iov[index].iov_len;
+ }
+ }
+
+ while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(osfd, PR_POLL_WRITE, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ /*
+ * optimization; if bytes sent is less than "amount" call
+ * select before returning. This is because it is likely that
+ * the next writev() call will return EWOULDBLOCK.
+ */
+ if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount)
+ && (timeout != PR_INTERVAL_NO_WAIT)) {
+ if (_PR_IS_NATIVE_THREAD(me)) {
+ if (socket_io_wait(osfd, WRITE_FD, timeout) < 0)
+ goto done;
+ } else {
+ if (_PR_WaitForFD(osfd, PR_POLL_WRITE, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ }
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_WRITEV_ERROR(err);
+ }
+done:
+ return(rv);
+}
+
+PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr,
+ PRUint32 *addrlen, PRIntervalTime timeout)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ while ((rv = accept(osfd, (struct sockaddr *) addr,
+ (_PRSockLen_t *)addrlen)) == -1) {
+ err = _MD_ERRNO();
+ if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
+ if (fd->secret->nonblocking) {
+ break;
+ }
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(osfd, PR_POLL_READ, timeout) == 0) {
+ rv = -1;
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ goto done;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ rv = -1;
+ goto done;
+ }
+ } else {
+ if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
+ goto done;
+ }
+ } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
+ continue;
+ } else {
+ break;
+ }
+ }
+ if (rv < 0) {
+ _PR_MD_MAP_ACCEPT_ERROR(err);
+ }
+done:
+#ifdef AIX
+ if (rv != -1) {
+ /* mask off the first byte of struct sockaddr (the length field) */
+ if (addr) {
+ addr->inet.family &= 0x00ff;
+ }
+ }
+#endif
+ return(rv);
+}
+
+extern int _connect (int s, const struct sockaddr *name, int namelen);
+PRInt32 _MD_connect(
+ PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout)
+{
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+ PRInt32 osfd = fd->secret->md.osfd;
+#ifdef IRIX
+extern PRInt32 _MD_irix_connect(
+ PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout);
+#endif
+
+ /*
+ * We initiate the connection setup by making a nonblocking connect()
+ * call. If the connect() call fails, there are two cases we handle
+ * specially:
+ * 1. The connect() call was interrupted by a signal. In this case
+ * we simply retry connect().
+ * 2. The NSPR socket is nonblocking and connect() fails with
+ * EINPROGRESS. We first wait until the socket becomes writable.
+ * Then we try to find out whether the connection setup succeeded
+ * or failed.
+ */
+
+retry:
+#ifdef IRIX
+ if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) {
+#else
+ if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) {
+#endif
+ err = _MD_ERRNO();
+
+ if (err == EINTR) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ goto retry;
+ }
+
+ if (!fd->secret->nonblocking && (err == EINPROGRESS)) {
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ /*
+ * _PR_WaitForFD() may return 0 (timeout or interrupt) or 1.
+ */
+
+ rv = _PR_WaitForFD(osfd, PR_POLL_WRITE, timeout);
+ if (rv == 0) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else {
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ }
+ return -1;
+ }
+ } else {
+ /*
+ * socket_io_wait() may return -1 or 1.
+ */
+
+ rv = socket_io_wait(osfd, WRITE_FD, timeout);
+ if (rv == -1) {
+ return -1;
+ }
+ }
+
+ PR_ASSERT(rv == 1);
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ err = _MD_unix_get_nonblocking_connect_error(osfd);
+ if (err != 0) {
+ _PR_MD_MAP_CONNECT_ERROR(err);
+ return -1;
+ }
+ return 0;
+ }
+
+ _PR_MD_MAP_CONNECT_ERROR(err);
+ }
+
+ return rv;
+} /* _MD_connect */
+
+PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen)
+{
+ PRInt32 rv, err;
+
+ rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_BIND_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog)
+{
+ PRInt32 rv, err;
+
+ rv = listen(fd->secret->md.osfd, backlog);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_LISTEN_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how)
+{
+ PRInt32 rv, err;
+
+ rv = shutdown(fd->secret->md.osfd, how);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SHUTDOWN_ERROR(err);
+ }
+ return(rv);
+}
+
+PRInt32 _MD_socketpair(int af, int type, int flags,
+ PRInt32 *osfd)
+{
+ PRInt32 rv, err;
+
+ rv = socketpair(af, type, flags, osfd);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SOCKETPAIR_ERROR(err);
+ }
+ return rv;
+}
+
+PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr,
+ PRUint32 *addrlen)
+{
+ PRInt32 rv, err;
+
+ rv = getsockname(fd->secret->md.osfd,
+ (struct sockaddr *) addr, (_PRSockLen_t *)addrlen);
+#ifdef AIX
+ if (rv == 0) {
+ /* mask off the first byte of struct sockaddr (the length field) */
+ if (addr) {
+ addr->inet.family &= 0x00ff;
+ }
+ }
+#endif
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_GETSOCKNAME_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr,
+ PRUint32 *addrlen)
+{
+ PRInt32 rv, err;
+
+ rv = getpeername(fd->secret->md.osfd,
+ (struct sockaddr *) addr, (_PRSockLen_t *)addrlen);
+#ifdef AIX
+ if (rv == 0) {
+ /* mask off the first byte of struct sockaddr (the length field) */
+ if (addr) {
+ addr->inet.family &= 0x00ff;
+ }
+ }
+#endif
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_GETPEERNAME_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level,
+ PRInt32 optname, char* optval, PRInt32* optlen)
+{
+ PRInt32 rv, err;
+
+ rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *)optlen);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_GETSOCKOPT_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level,
+ PRInt32 optname, const char* optval, PRInt32 optlen)
+{
+ PRInt32 rv, err;
+
+ rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen);
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_SETSOCKOPT_ERROR(err);
+ }
+ return rv==0?PR_SUCCESS:PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRInt32) _MD_pr_poll(PRPollDesc *pds, PRIntn npds,
+ PRIntervalTime timeout)
+{
+ PRPollDesc *pd, *epd;
+ PRPollQueue pq;
+ PRInt32 n, err, pdcnt;
+ PRIntn is;
+ _PRUnixPollDesc *unixpds, *unixpd;
+ _PRCPU *io_cpu;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ if (_PR_IS_NATIVE_THREAD(me)) {
+ fd_set rd, wt, ex;
+ struct timeval tv, *tvp = NULL;
+ int maxfd = -1;
+ /*
+ * For restarting _MD_SELECT() if it is interrupted by a signal.
+ * We use these variables to figure out how much time has elapsed
+ * and how much of the timeout still remains.
+ */
+ PRIntervalTime start, elapsed, remaining;
+
+ FD_ZERO(&rd);
+ FD_ZERO(&wt);
+ FD_ZERO(&ex);
+
+ for (pd = pds, epd = pd + npds; pd < epd; pd++) {
+ PRInt32 osfd;
+ PRInt16 in_flags = pd->in_flags;
+ PRFileDesc *bottom = pd->fd;
+
+ if ((NULL == bottom) || (in_flags == 0)) {
+ continue;
+ }
+ while (bottom->lower != NULL) {
+ bottom = bottom->lower;
+ }
+ osfd = bottom->secret->md.osfd;
+
+ if (osfd > maxfd) {
+ maxfd = osfd;
+ }
+ if (in_flags & PR_POLL_READ) {
+ FD_SET(osfd, &rd);
+ }
+ if (in_flags & PR_POLL_WRITE) {
+ FD_SET(osfd, &wt);
+ }
+ if (in_flags & PR_POLL_EXCEPT) {
+ FD_SET(osfd, &ex);
+ }
+ }
+ if (timeout != PR_INTERVAL_NO_TIMEOUT) {
+ tv.tv_sec = PR_IntervalToSeconds(timeout);
+ tv.tv_usec = PR_IntervalToMicroseconds(timeout) % PR_USEC_PER_SEC;
+ tvp = &tv;
+ start = PR_IntervalNow();
+ }
+
+retry:
+ n = _MD_SELECT(maxfd + 1, &rd, &wt, &ex, tvp);
+ if (n == -1 && errno == EINTR) {
+ if (timeout == PR_INTERVAL_NO_TIMEOUT) {
+ goto retry;
+ } else {
+ elapsed = (PRIntervalTime) (PR_IntervalNow() - start);
+ if (elapsed > timeout) {
+ n = 0; /* timed out */
+ } else {
+ remaining = timeout - elapsed;
+ tv.tv_sec = PR_IntervalToSeconds(remaining);
+ tv.tv_usec = PR_IntervalToMicroseconds(
+ remaining - PR_SecondsToInterval(tv.tv_sec));
+ goto retry;
+ }
+ }
+ }
+
+ if (n > 0) {
+ n = 0;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++) {
+ PRInt32 osfd;
+ PRInt16 in_flags = pd->in_flags;
+ PRInt16 out_flags = 0;
+ PRFileDesc *bottom = pd->fd;
+
+ if ((NULL == bottom) || (in_flags == 0)) {
+ pd->out_flags = 0;
+ continue;
+ }
+ while (bottom->lower != NULL) {
+ bottom = bottom->lower;
+ }
+ osfd = bottom->secret->md.osfd;
+
+ if ((in_flags & PR_POLL_READ) && FD_ISSET(osfd, &rd)) {
+ out_flags |= PR_POLL_READ;
+ }
+ if ((in_flags & PR_POLL_WRITE) && FD_ISSET(osfd, &wt)) {
+ out_flags |= PR_POLL_WRITE;
+ }
+ if ((in_flags & PR_POLL_EXCEPT) && FD_ISSET(osfd, &ex)) {
+ out_flags |= PR_POLL_EXCEPT;
+ }
+ pd->out_flags = out_flags;
+ if (out_flags) {
+ n++;
+ }
+ }
+ PR_ASSERT(n > 0);
+ } else if (n < 0) {
+ err = _MD_ERRNO();
+ if (err == EBADF) {
+ /* Find the bad fds */
+ n = 0;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++) {
+ PRFileDesc *bottom = pd->fd;
+ pd->out_flags = 0;
+ if ((NULL == bottom) || (pd->in_flags == 0)) {
+ continue;
+ }
+ while (bottom->lower != NULL) {
+ bottom = bottom->lower;
+ }
+ if (fcntl(bottom->secret->md.osfd, F_GETFL, 0) == -1) {
+ pd->out_flags = PR_POLL_NVAL;
+ n++;
+ }
+ }
+ PR_ASSERT(n > 0);
+ } else {
+ PR_ASSERT(err != EINTR); /* should have been handled above */
+ _PR_MD_MAP_SELECT_ERROR(err);
+ }
+ }
+
+ return n;
+ }
+
+ /*
+ * XXX
+ * PRPollDesc has a PRFileDesc field, fd, while the IOQ
+ * is a list of PRPollQueue structures, each of which contains
+ * a _PRUnixPollDesc. A _PRUnixPollDesc struct contains
+ * the OS file descriptor, osfd, and not a PRFileDesc.
+ * So, we have allocate memory for _PRUnixPollDesc structures,
+ * copy the flags information from the pds list and have pq
+ * point to this list of _PRUnixPollDesc structures.
+ *
+ * It would be better if the memory allocation can be avoided.
+ */
+
+ unixpds = (_PRUnixPollDesc*) PR_MALLOC(npds * sizeof(_PRUnixPollDesc));
+ if (!unixpds) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return -1;
+ }
+ unixpd = unixpds;
+
+ _PR_INTSOFF(is);
+ _PR_MD_IOQ_LOCK();
+ _PR_THREAD_LOCK(me);
+
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ _PR_THREAD_UNLOCK(me);
+ _PR_MD_IOQ_UNLOCK();
+ _PR_FAST_INTSON(is);
+ PR_DELETE(unixpds);
+ return -1;
+ }
+
+ pdcnt = 0;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++) {
+ PRInt32 osfd;
+ PRInt16 in_flags = pd->in_flags;
+ PRFileDesc *bottom = pd->fd;
+
+ if ((NULL == bottom) || (in_flags == 0)) {
+ continue;
+ }
+ while (bottom->lower != NULL) {
+ bottom = bottom->lower;
+ }
+ osfd = bottom->secret->md.osfd;
+
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+
+ unixpd->osfd = osfd;
+ unixpd->in_flags = pd->in_flags;
+ unixpd++;
+ pdcnt++;
+
+ if (in_flags & PR_POLL_READ) {
+ FD_SET(osfd, &_PR_FD_READ_SET(me->cpu));
+ _PR_FD_READ_CNT(me->cpu)[osfd]++;
+ }
+ if (in_flags & PR_POLL_WRITE) {
+ FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ (_PR_FD_WRITE_CNT(me->cpu))[osfd]++;
+ }
+ if (in_flags & PR_POLL_EXCEPT) {
+ FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++;
+ }
+ if (osfd > _PR_IOQ_MAX_OSFD(me->cpu))
+ _PR_IOQ_MAX_OSFD(me->cpu) = osfd;
+ }
+ if (timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = timeout;
+
+
+ pq.pds = unixpds;
+ pq.npds = pdcnt;
+
+ pq.thr = me;
+ io_cpu = me->cpu;
+ pq.on_ioq = PR_TRUE;
+ pq.timeout = timeout;
+ _PR_ADD_TO_IOQ(pq, me->cpu);
+ _PR_SLEEPQ_LOCK(me->cpu);
+ _PR_ADD_SLEEPQ(me, timeout);
+ me->state = _PR_IO_WAIT;
+ me->io_pending = PR_TRUE;
+ me->io_suspended = PR_FALSE;
+ _PR_SLEEPQ_UNLOCK(me->cpu);
+ _PR_THREAD_UNLOCK(me);
+ _PR_MD_IOQ_UNLOCK();
+
+ _PR_MD_WAIT(me, timeout);
+
+ me->io_pending = PR_FALSE;
+ me->io_suspended = PR_FALSE;
+
+ /*
+ * This thread should run on the same cpu on which it was blocked; when
+ * the IO request times out the fd sets and fd counts for the
+ * cpu are updated below.
+ */
+ PR_ASSERT(me->cpu == io_cpu);
+ /*
+ * Copy the out_flags from the _PRUnixPollDesc structures to the
+ * user's PRPollDesc structures and free the allocated memory
+ */
+ unixpd = unixpds;
+ for (pd = pds, epd = pd + npds; pd < epd; pd++) {
+ if ((NULL == pd->fd) || (pd->in_flags == 0)) {
+ pd->out_flags = 0;
+ continue;
+ }
+ pd->out_flags = unixpd->out_flags;
+ unixpd++;
+ }
+ PR_DELETE(unixpds);
+
+ /*
+ ** If we timed out the pollq might still be on the ioq. Remove it
+ ** before continuing.
+ */
+ if (pq.on_ioq) {
+ _PR_MD_IOQ_LOCK();
+ /*
+ * Need to check pq.on_ioq again
+ */
+ if (pq.on_ioq == PR_TRUE) {
+ PR_REMOVE_LINK(&pq.links);
+ for (pd = pds, epd = pd + npds; pd < epd; pd++) {
+ PRInt32 osfd;
+ PRInt16 in_flags = pd->in_flags;
+ PRFileDesc *bottom = pd->fd;
+
+ if ((NULL == bottom) || (in_flags == 0)) {
+ continue;
+ }
+ while (bottom->lower != NULL) {
+ bottom = bottom->lower;
+ }
+ osfd = bottom->secret->md.osfd;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if (in_flags & PR_POLL_READ) {
+ if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+ }
+ if (in_flags & PR_POLL_WRITE) {
+ if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ }
+ if (in_flags & PR_POLL_EXCEPT) {
+ if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+ }
+ _PR_MD_IOQ_UNLOCK();
+ }
+ _PR_INTSON(is);
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ } else {
+ n = 0;
+ if (pq.on_ioq == PR_FALSE) {
+ /* Count the number of ready descriptors */
+ while (--npds >= 0) {
+ if (pds->out_flags) {
+ n++;
+ }
+ pds++;
+ }
+ }
+ return n;
+ }
+}
+
+
+
+
+/************************************************************************/
+
+/*
+** Scan through io queue and find any bad fd's that triggered the error
+** from _MD_SELECT
+*/
+static void FindBadFDs(void)
+{
+ PRCList *q;
+ PRThread *me = _MD_CURRENT_THREAD();
+
+ PR_ASSERT(!_PR_IS_NATIVE_THREAD(me));
+ q = (_PR_IOQ(me->cpu)).next;
+ _PR_IOQ_MAX_OSFD(me->cpu) = -1;
+ _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
+ while (q != &_PR_IOQ(me->cpu)) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ PRBool notify = PR_FALSE;
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+ PRInt32 pq_max_osfd = -1;
+
+ q = q->next;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ pds->out_flags = 0;
+ PR_ASSERT(osfd >= 0 || pds->in_flags == 0);
+ if (pds->in_flags == 0) {
+ continue; /* skip this fd */
+ }
+ if (fcntl(osfd, F_GETFL, 0) == -1) {
+ /* Found a bad descriptor, remove it from the fd_sets. */
+ PR_LOG(_pr_io_lm, PR_LOG_MAX,
+ ("file descriptor %d is bad", osfd));
+ pds->out_flags = PR_POLL_NVAL;
+ notify = PR_TRUE;
+ }
+ if (osfd > pq_max_osfd) {
+ pq_max_osfd = osfd;
+ }
+ }
+
+ if (notify) {
+ PRIntn pri;
+ PR_REMOVE_LINK(&pq->links);
+ pq->on_ioq = PR_FALSE;
+
+ /*
+ * Decrement the count of descriptors for each desciptor/event
+ * because this I/O request is being removed from the
+ * ioq
+ */
+ pds = pq->pds;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ PRInt16 in_flags = pds->in_flags;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if (in_flags & PR_POLL_READ) {
+ if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+ }
+ if (in_flags & PR_POLL_WRITE) {
+ if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ }
+ if (in_flags & PR_POLL_EXCEPT) {
+ if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+
+ _PR_THREAD_LOCK(pq->thr);
+ if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
+ _PRCPU *cpu = pq->thr->cpu;
+ _PR_SLEEPQ_LOCK(pq->thr->cpu);
+ _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
+
+ pri = pq->thr->priority;
+ pq->thr->state = _PR_RUNNABLE;
+
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(pq->thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ }
+ _PR_THREAD_UNLOCK(pq->thr);
+ } else {
+ if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
+ }
+ }
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+ }
+}
+
+/************************************************************************/
+
+/*
+** Called by the scheduler when there is nothing to do. This means that
+** all threads are blocked on some monitor somewhere.
+**
+** Note: this code doesn't release the scheduler lock.
+*/
+/*
+** Pause the current CPU. longjmp to the cpu's pause stack
+**
+** This must be called with the scheduler locked
+*/
+void _MD_PauseCPU(PRIntervalTime ticks)
+{
+ PRThread *me = _MD_CURRENT_THREAD();
+#ifdef _PR_USE_POLL
+ int timeout;
+ struct pollfd *pollfds; /* an array of pollfd structures */
+ struct pollfd *pollfdPtr; /* a pointer that steps through the array */
+ unsigned long npollfds; /* number of pollfd structures in array */
+ int nfd; /* to hold the return value of poll() */
+#else
+ struct timeval timeout, *tvp;
+ fd_set r, w, e;
+ fd_set *rp, *wp, *ep;
+ PRInt32 max_osfd, nfd;
+#endif /* _PR_USE_POLL */
+ PRInt32 rv;
+ PRCList *q;
+ PRUint32 min_timeout;
+ sigset_t oldset;
+#ifdef IRIX
+extern sigset_t ints_off;
+#endif
+
+ PR_ASSERT(_PR_MD_GET_INTSOFF() != 0);
+
+ _PR_MD_IOQ_LOCK();
+
+#ifdef _PR_USE_POLL
+ /* Build up the pollfd structure array to wait on */
+
+ /* Find out how many pollfd structures are needed */
+ npollfds = 0;
+ for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+
+ npollfds += pq->npds;
+ }
+ /*
+ * We use a pipe to wake up a native thread. An fd is needed
+ * for the pipe and we poll it for reading.
+ */
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED())
+ npollfds++;
+
+ pollfds = (struct pollfd *) PR_MALLOC(npollfds * sizeof(struct pollfd));
+ pollfdPtr = pollfds;
+
+ /*
+ * If we need to poll the pipe for waking up a native thread,
+ * the pipe's fd is the first element in the pollfds array.
+ */
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ pollfdPtr->fd = _pr_md_pipefd[0];
+ pollfdPtr->events = PR_POLL_READ;
+ pollfdPtr++;
+ }
+
+ min_timeout = PR_INTERVAL_NO_TIMEOUT;
+ for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+
+ if (pq->timeout < min_timeout) {
+ min_timeout = pq->timeout;
+ }
+ for (; pds < epds; pds++, pollfdPtr++) {
+ /*
+ * Assert that the pollfdPtr pointer does not go
+ * beyond the end of the pollfds array
+ */
+ PR_ASSERT(pollfdPtr < pollfds + npollfds);
+ pollfdPtr->fd = pds->osfd;
+ /* direct copy of poll flags */
+ pollfdPtr->events = pds->in_flags;
+ }
+ }
+#else
+ /*
+ * assigment of fd_sets
+ */
+ r = _PR_FD_READ_SET(me->cpu);
+ w = _PR_FD_WRITE_SET(me->cpu);
+ e = _PR_FD_EXCEPTION_SET(me->cpu);
+
+ rp = &r;
+ wp = &w;
+ ep = &e;
+
+ max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1;
+ min_timeout = _PR_IOQ_TIMEOUT(me->cpu);
+#endif /* _PR_USE_POLL */
+ /*
+ ** Compute the minimum timeout value: make it the smaller of the
+ ** timeouts specified by the i/o pollers or the timeout of the first
+ ** sleeping thread.
+ */
+ q = _PR_SLEEPQ(me->cpu).next;
+
+ if (q != &_PR_SLEEPQ(me->cpu)) {
+ PRThread *t = _PR_THREAD_PTR(q);
+
+ if (t->sleep < min_timeout) {
+ min_timeout = t->sleep;
+ }
+ }
+ if (min_timeout > ticks) {
+ min_timeout = ticks;
+ }
+
+#ifdef _PR_USE_POLL
+ if (min_timeout == PR_INTERVAL_NO_TIMEOUT)
+ timeout = -1;
+ else
+ timeout = PR_IntervalToMilliseconds(min_timeout);
+#else
+ if (min_timeout == PR_INTERVAL_NO_TIMEOUT) {
+ tvp = NULL;
+ } else {
+ timeout.tv_sec = PR_IntervalToSeconds(min_timeout);
+ timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout)
+ % PR_USEC_PER_SEC;
+ tvp = &timeout;
+ }
+#endif /* _PR_USE_POLL */
+
+ _PR_MD_IOQ_UNLOCK();
+ _MD_CHECK_FOR_EXIT();
+ /*
+ * check for i/o operations
+ */
+#ifndef _PR_NO_CLOCK_TIMER
+ /*
+ * Disable the clock interrupts while we are in select, if clock interrupts
+ * are enabled. Otherwise, when the select/poll calls are interrupted, the
+ * timer value starts ticking from zero again when the system call is restarted.
+ */
+#ifdef IRIX
+ /*
+ * SIGCHLD signal is used on Irix to detect he termination of an
+ * sproc by SIGSEGV, SIGBUS or SIGABRT signals when
+ * _nspr_terminate_on_error is set.
+ */
+ if ((!_nspr_noclock) || (_nspr_terminate_on_error))
+#else
+ if (!_nspr_noclock)
+#endif /* IRIX */
+#ifdef IRIX
+ sigprocmask(SIG_BLOCK, &ints_off, &oldset);
+#else
+ PR_ASSERT(sigismember(&timer_set, SIGALRM));
+ sigprocmask(SIG_BLOCK, &timer_set, &oldset);
+#endif /* IRIX */
+#endif /* !_PR_NO_CLOCK_TIMER */
+
+#ifndef _PR_USE_POLL
+ PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp));
+ nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp);
+#else
+ nfd = _MD_POLL(pollfds, npollfds, timeout);
+#endif /* !_PR_USE_POLL */
+
+#ifndef _PR_NO_CLOCK_TIMER
+#ifdef IRIX
+ if ((!_nspr_noclock) || (_nspr_terminate_on_error))
+#else
+ if (!_nspr_noclock)
+#endif /* IRIX */
+ sigprocmask(SIG_SETMASK, &oldset, 0);
+#endif /* !_PR_NO_CLOCK_TIMER */
+
+ _MD_CHECK_FOR_EXIT();
+ _PR_MD_IOQ_LOCK();
+ /*
+ ** Notify monitors that are associated with the selected descriptors.
+ */
+#ifdef _PR_USE_POLL
+ if (nfd > 0) {
+ pollfdPtr = pollfds;
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ /*
+ * Assert that the pipe is the first element in the
+ * pollfds array.
+ */
+ PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]);
+ if ((pollfds[0].revents & PR_POLL_READ) && (nfd == 1)) {
+ /*
+ * woken up by another thread; read all the data
+ * in the pipe to empty the pipe
+ */
+ while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf,
+ PIPE_BUF)) == PIPE_BUF){
+ }
+ PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN)));
+ }
+ pollfdPtr++;
+ }
+ for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ PRBool notify = PR_FALSE;
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+
+ for (; pds < epds; pds++, pollfdPtr++) {
+ /*
+ * Assert that the pollfdPtr pointer does not go beyond
+ * the end of the pollfds array.
+ */
+ PR_ASSERT(pollfdPtr < pollfds + npollfds);
+ /*
+ * Assert that the fd's in the pollfds array (stepped
+ * through by pollfdPtr) are in the same order as
+ * the fd's in _PR_IOQ() (stepped through by q and pds).
+ * This is how the pollfds array was created earlier.
+ */
+ PR_ASSERT(pollfdPtr->fd == pds->osfd);
+ pds->out_flags = pollfdPtr->revents;
+ /* Negative fd's are ignored by poll() */
+ if (pds->osfd >= 0 && pds->out_flags) {
+ notify = PR_TRUE;
+ }
+ }
+ if (notify) {
+ PRIntn pri;
+ PRThread *thred;
+
+ PR_REMOVE_LINK(&pq->links);
+ pq->on_ioq = PR_FALSE;
+
+ /*
+ * Because this thread can run on a different cpu right
+ * after being added to the run queue, do not dereference
+ * pq
+ */
+ thred = pq->thr;
+ _PR_THREAD_LOCK(thred);
+ if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
+ _PRCPU *cpu = pq->thr->cpu;
+ _PR_SLEEPQ_LOCK(pq->thr->cpu);
+ _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
+
+ pri = pq->thr->priority;
+ pq->thr->state = _PR_RUNNABLE;
+
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(pq->thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ if (_pr_md_idle_cpus > 1)
+ _PR_MD_WAKEUP_WAITER(thred);
+ }
+ _PR_THREAD_UNLOCK(thred);
+ }
+ }
+ } else if (nfd == -1) {
+ PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno));
+ }
+
+ /* done with pollfds */
+ PR_DELETE(pollfds);
+#else
+ if (nfd > 0) {
+ q = _PR_IOQ(me->cpu).next;
+ _PR_IOQ_MAX_OSFD(me->cpu) = -1;
+ _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
+ while (q != &_PR_IOQ(me->cpu)) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ PRBool notify = PR_FALSE;
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+ PRInt32 pq_max_osfd = -1;
+
+ q = q->next;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ PRInt16 in_flags = pds->in_flags;
+ PRInt16 out_flags = 0;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if ((in_flags & PR_POLL_READ) && FD_ISSET(osfd, rp)) {
+ out_flags |= PR_POLL_READ;
+ }
+ if ((in_flags & PR_POLL_WRITE) && FD_ISSET(osfd, wp)) {
+ out_flags |= PR_POLL_WRITE;
+ }
+ if ((in_flags & PR_POLL_EXCEPT) && FD_ISSET(osfd, ep)) {
+ out_flags |= PR_POLL_EXCEPT;
+ }
+ pds->out_flags = out_flags;
+ if (out_flags) {
+ notify = PR_TRUE;
+ }
+ if (osfd > pq_max_osfd) {
+ pq_max_osfd = osfd;
+ }
+ }
+ if (notify == PR_TRUE) {
+ PRIntn pri;
+ PRThread *thred;
+
+ PR_REMOVE_LINK(&pq->links);
+ pq->on_ioq = PR_FALSE;
+
+ /*
+ * Decrement the count of descriptors for each desciptor/event
+ * because this I/O request is being removed from the
+ * ioq
+ */
+ pds = pq->pds;
+ for (; pds < epds; pds++) {
+ PRInt32 osfd = pds->osfd;
+ PRInt16 in_flags = pds->in_flags;
+ PR_ASSERT(osfd >= 0 || in_flags == 0);
+ if (in_flags & PR_POLL_READ) {
+ if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+ }
+ if (in_flags & PR_POLL_WRITE) {
+ if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ }
+ if (in_flags & PR_POLL_EXCEPT) {
+ if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+
+ /*
+ * Because this thread can run on a different cpu right
+ * after being added to the run queue, do not dereference
+ * pq
+ */
+ thred = pq->thr;
+ _PR_THREAD_LOCK(thred);
+ if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
+ _PRCPU *cpu = thred->cpu;
+ _PR_SLEEPQ_LOCK(pq->thr->cpu);
+ _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
+
+ pri = pq->thr->priority;
+ pq->thr->state = _PR_RUNNABLE;
+
+ pq->thr->cpu = cpu;
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(pq->thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ if (_pr_md_idle_cpus > 1)
+ _PR_MD_WAKEUP_WAITER(thred);
+ }
+ _PR_THREAD_UNLOCK(thred);
+ } else {
+ if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
+ }
+ }
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) {
+ /*
+ * woken up by another thread; read all the data
+ * in the pipe to empty the pipe
+ */
+ while ((rv =
+ read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF))
+ == PIPE_BUF){
+ }
+ PR_ASSERT((rv > 0) ||
+ ((rv == -1) && (errno == EAGAIN)));
+ }
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+ }
+ } else if (nfd < 0) {
+ if (errno == EBADF) {
+ FindBadFDs();
+ } else {
+ PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d",
+ errno));
+ }
+ } else {
+ PR_ASSERT(nfd == 0);
+ /*
+ * compute the new value of _PR_IOQ_TIMEOUT
+ */
+ q = _PR_IOQ(me->cpu).next;
+ _PR_IOQ_MAX_OSFD(me->cpu) = -1;
+ _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
+ while (q != &_PR_IOQ(me->cpu)) {
+ PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
+ _PRUnixPollDesc *pds = pq->pds;
+ _PRUnixPollDesc *epds = pds + pq->npds;
+ PRInt32 pq_max_osfd = -1;
+
+ q = q->next;
+ for (; pds < epds; pds++) {
+ if (pds->osfd > pq_max_osfd) {
+ pq_max_osfd = pds->osfd;
+ }
+ }
+ if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
+ _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
+ }
+ if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+ }
+ }
+#endif /* _PR_USE_POLL */
+ _PR_MD_IOQ_UNLOCK();
+}
+
+void _MD_Wakeup_CPUs()
+{
+ PRInt32 rv, data;
+
+ data = 0;
+ rv = write(_pr_md_pipefd[1], &data, 1);
+
+ while ((rv < 0) && (errno == EAGAIN)) {
+ /*
+ * pipe full, read all data in pipe to empty it
+ */
+ while ((rv =
+ read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF))
+ == PIPE_BUF) {
+ }
+ PR_ASSERT((rv > 0) ||
+ ((rv == -1) && (errno == EAGAIN)));
+ rv = write(_pr_md_pipefd[1], &data, 1);
+ }
+}
+
+
+void _MD_InitCPUS()
+{
+ PRInt32 rv, flags;
+ PRThread *me = _MD_CURRENT_THREAD();
+
+ rv = pipe(_pr_md_pipefd);
+ PR_ASSERT(rv == 0);
+ _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
+ FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu));
+
+ flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0);
+ fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK);
+ flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0);
+ fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK);
+}
+
+/*
+** Unix SIGALRM (clock) signal handler
+*/
+static void ClockInterruptHandler()
+{
+ int olderrno;
+ PRUintn pri;
+ _PRCPU *cpu = _PR_MD_CURRENT_CPU();
+ PRThread *me = _MD_CURRENT_THREAD();
+
+#ifdef SOLARIS
+ if (!me || _PR_IS_NATIVE_THREAD(me)) {
+ _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK;
+ return;
+ }
+#endif
+
+ if (_PR_MD_GET_INTSOFF() != 0) {
+ cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK;
+ return;
+ }
+ _PR_MD_SET_INTSOFF(1);
+
+ olderrno = errno;
+ _PR_ClockInterrupt();
+ errno = olderrno;
+
+ /*
+ ** If the interrupt wants a resched or if some other thread at
+ ** the same priority needs the cpu, reschedule.
+ */
+ pri = me->priority;
+ if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) {
+#ifdef _PR_NO_PREEMPT
+ cpu->resched = PR_TRUE;
+ if (pr_interruptSwitchHook) {
+ (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg);
+ }
+#else /* _PR_NO_PREEMPT */
+ /*
+ ** Re-enable unix interrupts (so that we can use
+ ** setjmp/longjmp for context switching without having to
+ ** worry about the signal state)
+ */
+ sigprocmask(SIG_SETMASK, &empty_set, 0);
+ PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch"));
+
+ if(!(me->flags & _PR_IDLE_THREAD)) {
+ _PR_THREAD_LOCK(me);
+ me->state = _PR_RUNNABLE;
+ me->cpu = cpu;
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(me, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ _PR_THREAD_UNLOCK(me);
+ } else
+ me->state = _PR_RUNNABLE;
+ _MD_SWITCH_CONTEXT(me);
+ PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch"));
+#endif /* _PR_NO_PREEMPT */
+ }
+ /*
+ * Because this thread could be running on a different cpu after
+ * a context switch the current cpu should be accessed and the
+ * value of the 'cpu' variable should not be used.
+ */
+ _PR_MD_SET_INTSOFF(0);
+}
+
+/* # of milliseconds per clock tick that we will use */
+#define MSEC_PER_TICK 50
+
+
+void _MD_StartInterrupts()
+{
+ struct itimerval itval;
+ char *eval;
+ struct sigaction vtact;
+
+ vtact.sa_handler = (void (*)()) ClockInterruptHandler;
+ vtact.sa_flags = SA_RESTART;
+ vtact.sa_mask = timer_set;
+ sigaction(SIGALRM, &vtact, 0);
+
+ if ((eval = getenv("NSPR_NOCLOCK")) != NULL) {
+ if (atoi(eval) == 0)
+ _nspr_noclock = 0;
+ else
+ _nspr_noclock = 1;
+ }
+
+#ifndef _PR_NO_CLOCK_TIMER
+ if (!_nspr_noclock) {
+ itval.it_interval.tv_sec = 0;
+ itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC;
+ itval.it_value = itval.it_interval;
+ setitimer(ITIMER_REAL, &itval, 0);
+ }
+#endif
+}
+
+void _MD_StopInterrupts()
+{
+ sigprocmask(SIG_BLOCK, &timer_set, 0);
+}
+
+void _MD_DisableClockInterrupts()
+{
+ struct itimerval itval;
+ extern PRUintn _pr_numCPU;
+
+ PR_ASSERT(_pr_numCPU == 1);
+ if (!_nspr_noclock) {
+ itval.it_interval.tv_sec = 0;
+ itval.it_interval.tv_usec = 0;
+ itval.it_value = itval.it_interval;
+ setitimer(ITIMER_REAL, &itval, 0);
+ }
+}
+
+void _MD_BlockClockInterrupts()
+{
+ sigprocmask(SIG_BLOCK, &timer_set, 0);
+}
+
+void _MD_UnblockClockInterrupts()
+{
+ sigprocmask(SIG_UNBLOCK, &timer_set, 0);
+}
+
+void _MD_MakeNonblock(PRFileDesc *fd)
+{
+ PRInt32 osfd = fd->secret->md.osfd;
+ int flags;
+
+ if (osfd <= 2) {
+ /* Don't mess around with stdin, stdout or stderr */
+ return;
+ }
+ flags = fcntl(osfd, F_GETFL, 0);
+
+ /*
+ * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible.
+ * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O),
+ * otherwise connect() still blocks and can be interrupted by SIGALRM.
+ */
+
+#ifdef SUNOS4
+ fcntl(osfd, F_SETFL, flags | FNDELAY);
+#else
+ fcntl(osfd, F_SETFL, flags | O_NONBLOCK);
+#endif
+ }
+
+PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode)
+{
+ PRInt32 osflags;
+ PRInt32 rv, err;
+
+ if (flags & PR_RDWR) {
+ osflags = O_RDWR;
+ } else if (flags & PR_WRONLY) {
+ osflags = O_WRONLY;
+ } else {
+ osflags = O_RDONLY;
+ }
+
+ if (flags & PR_APPEND)
+ osflags |= O_APPEND;
+ if (flags & PR_TRUNCATE)
+ osflags |= O_TRUNC;
+ if (flags & PR_SYNC) {
+#if defined(FREEBSD)
+ osflags |= O_FSYNC;
+#else
+ osflags |= O_SYNC;
+#endif
+ }
+
+ /*
+ ** On creations we hold the 'create' lock in order to enforce
+ ** the semantics of PR_Rename. (see the latter for more details)
+ */
+ if (flags & PR_CREATE_FILE)
+ {
+ osflags |= O_CREAT ;
+ if (NULL !=_pr_rename_lock)
+ PR_Lock(_pr_rename_lock);
+ }
+
+ rv = open(name, osflags, mode);
+
+ if (rv < 0) {
+ err = _MD_ERRNO();
+ _PR_MD_MAP_OPEN_ERROR(err);
+ }
+
+ if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock))
+ PR_Unlock(_pr_rename_lock);
+ return rv;
+}
+
+PRIntervalTime intr_timeout_ticks;
+
+static void sigsegvhandler() {
+ fprintf(stderr,"Received SIGSEGV\n");
+ fflush(stderr);
+ pause();
+}
+
+static void sigaborthandler() {
+ fprintf(stderr,"Received SIGABRT\n");
+ fflush(stderr);
+ pause();
+}
+
+static void sigbushandler() {
+ fprintf(stderr,"Received SIGBUS\n");
+ fflush(stderr);
+ pause();
+}
+
+#endif /* !defined(_PR_PTHREADS) */
+
+void _PR_UnixInit()
+{
+ struct sigaction sigact;
+ int rv;
+
+ sigemptyset(&timer_set);
+
+#if !defined(_PR_PTHREADS)
+
+ sigaddset(&timer_set, SIGALRM);
+ sigemptyset(&empty_set);
+ intr_timeout_ticks =
+ PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS);
+
+#if defined(SOLARIS) || defined(IRIX)
+
+ if (getenv("NSPR_SIGSEGV_HANDLE")) {
+ sigact.sa_handler = sigsegvhandler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = timer_set;
+ sigaction(SIGSEGV, &sigact, 0);
+ }
+
+ if (getenv("NSPR_SIGABRT_HANDLE")) {
+ sigact.sa_handler = sigaborthandler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = timer_set;
+ sigaction(SIGABRT, &sigact, 0);
+ }
+
+ if (getenv("NSPR_SIGBUS_HANDLE")) {
+ sigact.sa_handler = sigbushandler;
+ sigact.sa_flags = SA_RESTART;
+ sigact.sa_mask = timer_set;
+ sigaction(SIGBUS, &sigact, 0);
+ }
+
+#endif
+#endif /* !defined(_PR_PTHREADS) */
+
+ /*
+ * Under HP-UX DCE threads, sigaction() installs a per-thread
+ * handler, so we use sigvector() to install a process-wide
+ * handler.
+ */
+#if defined(HPUX) && defined(_PR_DCETHREADS)
+ {
+ struct sigvec vec;
+
+ vec.sv_handler = SIG_IGN;
+ vec.sv_mask = 0;
+ vec.sv_flags = 0;
+ rv = sigvector(SIGPIPE, &vec, NULL);
+ PR_ASSERT(0 == rv);
+ }
+#else
+ sigact.sa_handler = SIG_IGN;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = 0;
+ rv = sigaction(SIGPIPE, &sigact, 0);
+ PR_ASSERT(0 == rv);
+#endif /* HPUX && _PR_DCETHREADS */
+
+ _pr_rename_lock = PR_NewLock();
+ PR_ASSERT(NULL != _pr_rename_lock);
+ _pr_Xfe_mon = PR_NewMonitor();
+ PR_ASSERT(NULL != _pr_Xfe_mon);
+
+}
+
+/*
+ * _MD_InitSegs --
+ *
+ * This is Unix's version of _PR_MD_INIT_SEGS(), which is
+ * called by _PR_InitSegs(), which in turn is called by
+ * PR_Init().
+ */
+void _MD_InitSegs()
+{
+#ifdef DEBUG
+ /*
+ ** Disable using mmap(2) if NSPR_NO_MMAP is set
+ */
+ if (getenv("NSPR_NO_MMAP")) {
+ _pr_zero_fd = -2;
+ return;
+ }
+#endif
+ _pr_zero_fd = open("/dev/zero",O_RDWR , 0);
+ _pr_md_lock = PR_NewLock();
+}
+
+PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr)
+{
+ static char *lastaddr = (char*) _PR_STACK_VMBASE;
+ PRStatus retval = PR_SUCCESS;
+ int prot;
+ void *rv;
+
+ PR_ASSERT(seg != 0);
+ PR_ASSERT(size != 0);
+
+ PR_Lock(_pr_md_lock);
+ if (_pr_zero_fd < 0) {
+from_heap:
+ seg->vaddr = PR_MALLOC(size);
+ if (!seg->vaddr) {
+ retval = PR_FAILURE;
+ }
+ else {
+ seg->size = size;
+ seg->access = PR_SEGMENT_RDWR;
+ }
+ goto exit;
+ }
+
+ prot = PROT_READ|PROT_WRITE;
+ rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot,
+ _MD_MMAP_FLAGS,
+ _pr_zero_fd, 0);
+ if (rv == (void*)-1) {
+ goto from_heap;
+ }
+ lastaddr += size;
+ seg->vaddr = rv;
+ seg->size = size;
+ seg->access = PR_SEGMENT_RDWR;
+ seg->flags = _PR_SEG_VM;
+
+exit:
+ PR_Unlock(_pr_md_lock);
+ return retval;
+}
+
+void _MD_FreeSegment(PRSegment *seg)
+{
+ if (seg->flags & _PR_SEG_VM)
+ (void) munmap(seg->vaddr, seg->size);
+ else
+ PR_DELETE(seg->vaddr);
+}
+
+/*
+ *-----------------------------------------------------------------------
+ *
+ * PR_Now --
+ *
+ * Returns the current time in microseconds since the epoch.
+ * The epoch is midnight January 1, 1970 GMT.
+ * The implementation is machine dependent. This is the Unix
+ * implementation.
+ * Cf. time_t time(time_t *tp)
+ *
+ *-----------------------------------------------------------------------
+ */
+
+PR_IMPLEMENT(PRTime)
+PR_Now(void)
+{
+ struct timeval tv;
+ PRInt64 s, us, s2us;
+
+#if (defined(SOLARIS) && defined(_SVID_GETTOD)) || defined(SONY)
+ gettimeofday(&tv);
+#else
+ gettimeofday(&tv, 0);
+#endif
+ LL_I2L(s2us, PR_USEC_PER_SEC);
+ LL_I2L(s, tv.tv_sec);
+ LL_I2L(us, tv.tv_usec);
+ LL_MUL(s, s, s2us);
+ LL_ADD(s, s, us);
+ return s;
+}
+
+PR_IMPLEMENT(PRIntervalTime) _PR_UNIX_GetInterval()
+{
+ struct timeval time;
+ PRIntervalTime ticks;
+
+#if defined(_SVID_GETTOD) || defined(SONY)
+ (void)gettimeofday(&time); /* fallicy of course */
+#else
+ (void)gettimeofday(&time, NULL); /* fallicy of course */
+#endif
+ ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */
+ ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */
+ return ticks;
+} /* _PR_SUNOS_GetInterval */
+
+PR_IMPLEMENT(PRIntervalTime) _PR_UNIX_TicksPerSecond()
+{
+ return 1000; /* this needs some work :) */
+}
+
+/*
+ * _PR_UnixTransmitFile
+ *
+ * Send file fd across socket sd. If headers is non-NULL, 'hlen'
+ * bytes of headers is sent before sending the file.
+ *
+ * PR_TRANSMITFILE_CLOSE_SOCKET flag - close socket after sending file
+ *
+ * return number of bytes sent or -1 on error
+ *
+ */
+#define TRANSMITFILE_MMAP_CHUNK (256 * 1024)
+PR_IMPLEMENT(PRInt32) _PR_UnixTransmitFile(PRFileDesc *sd, PRFileDesc *fd,
+const void *headers, PRInt32 hlen, PRTransmitFileFlags flags,
+PRIntervalTime timeout)
+{
+ PRInt32 rv, count = 0;
+ PRInt32 len, index = 0;
+ struct stat statbuf;
+ struct PRIOVec iov[2];
+ void *addr;
+ PRInt32 err;
+
+ /* Get file size */
+ if (fstat(fd->secret->md.osfd, &statbuf) == -1) {
+ err = _MD_ERRNO();
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ETIMEDOUT:
+#ifdef ENOLINK
+ case ENOLINK:
+#endif
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+ count = -1;
+ goto done;
+ }
+ /*
+ * If the file is large, mmap and send the file in chunks so as
+ * to not consume too much virtual address space
+ */
+ len = statbuf.st_size < TRANSMITFILE_MMAP_CHUNK ? statbuf.st_size :
+ TRANSMITFILE_MMAP_CHUNK;
+ /*
+ * Map in (part of) file. Take care of zero-length files.
+ */
+ if (len) {
+ addr = mmap((caddr_t) 0, len, PROT_READ, MAP_PRIVATE,
+ fd->secret->md.osfd, 0);
+
+ if (addr == (void*)-1) {
+ _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO());
+ count = -1;
+ goto done;
+ }
+ }
+ /*
+ * send headers, first, followed by the file
+ */
+ if (hlen) {
+ iov[index].iov_base = (char *) headers;
+ iov[index].iov_len = hlen;
+ index++;
+ }
+ iov[index].iov_base = (char*)addr;
+ iov[index].iov_len = len;
+ index++;
+ rv = PR_Writev(sd, iov, index, timeout);
+ if (len)
+ munmap(addr,len);
+ if (rv >= 0) {
+ PR_ASSERT(rv == hlen + len);
+ statbuf.st_size -= len;
+ count += rv;
+ } else {
+ count = -1;
+ goto done;
+ }
+ /*
+ * send remaining bytes of the file, if any
+ */
+ len = statbuf.st_size < TRANSMITFILE_MMAP_CHUNK ? statbuf.st_size :
+ TRANSMITFILE_MMAP_CHUNK;
+ while (len > 0) {
+ /*
+ * Map in (part of) file
+ */
+ PR_ASSERT((count - hlen) % TRANSMITFILE_MMAP_CHUNK == 0);
+ addr = mmap((caddr_t) 0, len, PROT_READ, MAP_PRIVATE,
+ fd->secret->md.osfd, count - hlen);
+
+ if (addr == (void*)-1) {
+ _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO());
+ count = -1;
+ goto done;
+ }
+ rv = PR_Send(sd, addr, len, 0, timeout);
+ munmap(addr,len);
+ if (rv >= 0) {
+ PR_ASSERT(rv == len);
+ statbuf.st_size -= rv;
+ count += rv;
+ len = statbuf.st_size < TRANSMITFILE_MMAP_CHUNK ?
+ statbuf.st_size : TRANSMITFILE_MMAP_CHUNK;
+ } else {
+ count = -1;
+ goto done;
+ }
+ }
+done:
+ if ((count >= 0) && (flags & PR_TRANSMITFILE_CLOSE_SOCKET))
+ PR_Close(sd);
+ return count;
+}
+
+#if defined(HPUX11) && !defined(_PR_PTHREADS)
+
+/*
+ * _PR_HPUXTransmitFile
+ *
+ * Send file fd across socket sd. If headers is non-NULL, 'hlen'
+ * bytes of headers is sent before sending the file.
+ *
+ * PR_TRANSMITFILE_CLOSE_SOCKET flag - close socket after sending file
+ *
+ * return number of bytes sent or -1 on error
+ *
+ * This implementation takes advantage of the sendfile() system
+ * call available in HP-UX B.11.00.
+ *
+ * Known problem: sendfile() does not work with NSPR's malloc()
+ * functions. The reason is unknown. So if you want to use
+ * _PR_HPUXTransmitFile(), you must not override the native malloc()
+ * functions.
+ */
+
+PRInt32
+_PR_HPUXTransmitFile(PRFileDesc *sd, PRFileDesc *fd,
+ const void *headers, PRInt32 hlen, PRTransmitFileFlags flags,
+ PRIntervalTime timeout)
+{
+ struct stat statbuf;
+ PRInt32 nbytes_to_send;
+ off_t offset;
+ struct iovec hdtrl[2]; /* optional header and trailer buffers */
+ int send_flags;
+ PRInt32 count;
+ PRInt32 rv, err;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ /* Get file size */
+ if (fstat(fd->secret->md.osfd, &statbuf) == -1) {
+ _PR_MD_MAP_FSTAT_ERROR(errno);
+ return -1;
+ }
+ nbytes_to_send = hlen + statbuf.st_size;
+ offset = 0;
+
+ hdtrl[0].iov_base = (void *) headers; /* cast away the 'const' */
+ hdtrl[0].iov_len = hlen;
+ hdtrl[1].iov_base = NULL;
+ hdtrl[1].iov_base = 0;
+ /*
+ * SF_DISCONNECT seems to disconnect the socket even if sendfile()
+ * only does a partial send on a nonblocking socket. This
+ * would prevent the subsequent sendfile() calls on that socket
+ * from working. So we don't use the SD_DISCONNECT flag.
+ */
+ send_flags = 0;
+ rv = 0;
+
+ while (1) {
+ count = sendfile(sd->secret->md.osfd, fd->secret->md.osfd,
+ offset, 0, hdtrl, send_flags);
+ PR_ASSERT(count <= nbytes_to_send);
+ if (count == -1) {
+ err = errno;
+ if (err == EINTR) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ continue; /* retry */
+ }
+ if (err != EAGAIN && err != EWOULDBLOCK) {
+ _MD_hpux_map_sendfile_error(err);
+ return -1;
+ }
+ count = 0;
+ }
+ rv += count;
+
+ if (count < nbytes_to_send) {
+ /*
+ * Optimization: if bytes sent is less than requested, call
+ * select before returning. This is because it is likely that
+ * the next sendfile() call will return EWOULDBLOCK.
+ */
+ if (!_PR_IS_NATIVE_THREAD(me)) {
+ if (_PR_WaitForFD(sd->secret->md.osfd,
+ PR_POLL_WRITE, timeout) == 0) {
+ if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ } else {
+ PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
+ }
+ return -1;
+ } else if (_PR_PENDING_INTERRUPT(me)) {
+ me->flags &= ~_PR_INTERRUPT;
+ PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
+ return -1;
+ }
+ } else {
+ if (socket_io_wait(sd->secret->md.osfd, WRITE_FD, timeout)< 0) {
+ return -1;
+ }
+ }
+
+ if (hdtrl[0].iov_len == 0) {
+ PR_ASSERT(hdtrl[0].iov_base == NULL);
+ offset += count;
+ } else if (count < hdtrl[0].iov_len) {
+ PR_ASSERT(offset == 0);
+ hdtrl[0].iov_base = (char *) hdtrl[0].iov_base + count;
+ hdtrl[0].iov_len -= count;
+ } else {
+ offset = count - hdtrl[0].iov_len;
+ hdtrl[0].iov_base = NULL;
+ hdtrl[0].iov_len = 0;
+ }
+ nbytes_to_send -= count;
+ } else {
+ break; /* done */
+ }
+ }
+
+ if (flags & PR_TRANSMITFILE_CLOSE_SOCKET) {
+ PR_Close(sd);
+ }
+ return rv;
+}
+
+#endif /* HPUX11 && !_PR_PTHREADS */
+
+#if !defined(_PR_PTHREADS)
+/*
+** Wait for I/O on a single descriptor.
+ *
+ * return 0, if timed-out or interrupted, else return 1
+*/
+PRInt32 _PR_WaitForFD(PRInt32 osfd, PRUintn how, PRIntervalTime timeout)
+{
+ _PRUnixPollDesc pd;
+ PRPollQueue pq;
+ PRIntn is;
+ PRInt32 rv = 1;
+ _PRCPU *io_cpu;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
+ PR_LOG(_pr_io_lm, PR_LOG_MIN,
+ ("waiting to %s on osfd=%d",
+ (how == PR_POLL_READ) ? "read" : "write",
+ osfd));
+
+ if (timeout == PR_INTERVAL_NO_WAIT) return 0;
+
+ pd.osfd = osfd;
+ pd.in_flags = how;
+ pd.out_flags = 0;
+
+ pq.pds = &pd;
+ pq.npds = 1;
+
+ _PR_INTSOFF(is);
+ _PR_MD_IOQ_LOCK();
+ _PR_THREAD_LOCK(me);
+
+ if (_PR_PENDING_INTERRUPT(me)) {
+ _PR_THREAD_UNLOCK(me);
+ _PR_MD_IOQ_UNLOCK();
+ _PR_FAST_INTSON(is);
+ return 0;
+ }
+
+ pq.thr = me;
+ io_cpu = me->cpu;
+ pq.on_ioq = PR_TRUE;
+ pq.timeout = timeout;
+ _PR_ADD_TO_IOQ(pq, me->cpu);
+ if (how == PR_POLL_READ) {
+ FD_SET(osfd, &_PR_FD_READ_SET(me->cpu));
+ (_PR_FD_READ_CNT(me->cpu))[osfd]++;
+ } else if (how == PR_POLL_WRITE) {
+ FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ (_PR_FD_WRITE_CNT(me->cpu))[osfd]++;
+ } else {
+ FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++;
+ }
+ if (_PR_IOQ_MAX_OSFD(me->cpu) < osfd)
+ _PR_IOQ_MAX_OSFD(me->cpu) = osfd;
+ if (_PR_IOQ_TIMEOUT(me->cpu) > timeout)
+ _PR_IOQ_TIMEOUT(me->cpu) = timeout;
+
+
+ _PR_SLEEPQ_LOCK(me->cpu);
+ _PR_ADD_SLEEPQ(me, timeout);
+ me->state = _PR_IO_WAIT;
+ me->io_pending = PR_TRUE;
+ me->io_suspended = PR_FALSE;
+ _PR_SLEEPQ_UNLOCK(me->cpu);
+ _PR_THREAD_UNLOCK(me);
+ _PR_MD_IOQ_UNLOCK();
+
+ _PR_MD_WAIT(me, timeout);
+ me->io_pending = PR_FALSE;
+ me->io_suspended = PR_FALSE;
+
+ /*
+ * This thread should run on the same cpu on which it was blocked; when
+ * the IO request times out the fd sets and fd counts for the
+ * cpu are updated below.
+ */
+ PR_ASSERT(me->cpu == io_cpu);
+
+ /*
+ ** If we timed out the pollq might still be on the ioq. Remove it
+ ** before continuing.
+ */
+ if (pq.on_ioq) {
+ _PR_MD_IOQ_LOCK();
+ /*
+ * Need to check pq.on_ioq again
+ */
+ if (pq.on_ioq) {
+ PR_REMOVE_LINK(&pq.links);
+ if (how == PR_POLL_READ) {
+ if ((--(_PR_FD_READ_CNT(me->cpu))[osfd]) == 0)
+ FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
+
+ } else if (how == PR_POLL_WRITE) {
+ if ((--(_PR_FD_WRITE_CNT(me->cpu))[osfd]) == 0)
+ FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
+ } else {
+ if ((--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]) == 0)
+ FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
+ }
+ }
+ _PR_MD_IOQ_UNLOCK();
+ rv = 0;
+ }
+ _PR_FAST_INTSON(is);
+ return(rv);
+}
+
+/*
+ * Unblock threads waiting for I/O
+ * used when interrupting threads
+ *
+ * NOTE: The thread lock should held when this function is called.
+ * On return, the thread lock is released.
+ */
+void _PR_Unblock_IO_Wait(PRThread *thr)
+{
+ int pri = thr->priority;
+ _PRCPU *cpu = thr->cpu;
+
+ /*
+ * GLOBAL threads wakeup periodically to check for interrupt
+ */
+ if (_PR_IS_NATIVE_THREAD(thr)) {
+ _PR_THREAD_UNLOCK(thr);
+ return;
+ }
+
+ PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ));
+ _PR_SLEEPQ_LOCK(cpu);
+ _PR_DEL_SLEEPQ(thr, PR_TRUE);
+ _PR_SLEEPQ_UNLOCK(cpu);
+
+ PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD));
+ thr->state = _PR_RUNNABLE;
+ _PR_RUNQ_LOCK(cpu);
+ _PR_ADD_RUNQ(thr, cpu, pri);
+ _PR_RUNQ_UNLOCK(cpu);
+ _PR_THREAD_UNLOCK(thr);
+ _PR_MD_WAKEUP_WAITER(thr);
+}
+#endif /* !defined(_PR_PTHREADS) */
+
+/*
+ * When a nonblocking connect has completed, determine whether it
+ * succeeded or failed, and if it failed, what the error code is.
+ *
+ * The function returns the error code. An error code of 0 means
+ * that the nonblocking connect succeeded.
+ */
+
+int _MD_unix_get_nonblocking_connect_error(int osfd)
+{
+#if defined(NCR) || defined(UNIXWARE) || defined(SNI) || defined(NEC)
+ /*
+ * getsockopt() fails with EPIPE, so use getmsg() instead.
+ */
+
+ int rv;
+ int flags = 0;
+ rv = getmsg(osfd, NULL, NULL, &flags);
+ PR_ASSERT(-1 == rv || 0 == rv);
+ if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) {
+ return errno;
+ }
+ return 0; /* no error */
+#else
+ int err;
+ _PRSockLen_t optlen = sizeof(err);
+ if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) {
+ return errno;
+ } else {
+ return err;
+ }
+#endif
+}
+
+/************************************************************************/
+
+/*
+** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread
+** safe. Unfortunately, neither is mozilla. To make these programs work
+** in a pre-emptive threaded environment, we need to use a lock.
+*/
+
+void PR_XLock()
+{
+ PR_EnterMonitor(_pr_Xfe_mon);
+}
+
+void PR_XUnlock()
+{
+ PR_ExitMonitor(_pr_Xfe_mon);
+}
+
+PRBool PR_XIsLocked()
+{
+ return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE;
+}
+
+void PR_XWait(int ms)
+{
+ PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms));
+}
+
+void PR_XNotify(void)
+{
+ PR_Notify(_pr_Xfe_mon);
+}
+
+void PR_XNotifyAll(void)
+{
+ PR_NotifyAll(_pr_Xfe_mon);
+}
+
+#ifdef HAVE_BSD_FLOCK
+
+#include <sys/file.h>
+
+PR_IMPLEMENT(PRStatus)
+_MD_LockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = flock(f, LOCK_EX);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_TLockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = flock(f, LOCK_EX|LOCK_NB);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_UnlockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = flock(f, LOCK_UN);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+#else
+
+PR_IMPLEMENT(PRStatus)
+_MD_LockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = lockf(f, F_LOCK, 0);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_TLockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = lockf(f, F_TLOCK, 0);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+PR_IMPLEMENT(PRStatus)
+_MD_UnlockFile(PRInt32 f)
+{
+ PRInt32 rv;
+ rv = lockf(f, F_ULOCK, 0);
+ if (rv == 0)
+ return PR_SUCCESS;
+ _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+#endif
+
+PR_IMPLEMENT(PRStatus) _MD_gethostname(char *name, PRUint32 namelen)
+{
+ PRIntn rv;
+
+ rv = gethostname(name, namelen);
+ if (0 == rv) {
+ return PR_SUCCESS;
+ }
+ _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO());
+ return PR_FAILURE;
+}
+
+/*
+ *******************************************************************
+ *
+ * Memory-mapped files
+ *
+ *******************************************************************
+ */
+
+PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size)
+{
+ PRFileInfo info;
+ PRUint32 sz;
+
+ LL_L2UI(sz, size);
+ if (sz) {
+ if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) {
+ return PR_FAILURE;
+ }
+ if (sz > info.size) {
+ /*
+ * Need to extend the file
+ */
+ if (fmap->prot != PR_PROT_READWRITE) {
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0);
+ return PR_FAILURE;
+ }
+ if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) {
+ return PR_FAILURE;
+ }
+ if (PR_Write(fmap->fd, "", 1) != 1) {
+ return PR_FAILURE;
+ }
+ }
+ }
+ if (fmap->prot == PR_PROT_READONLY) {
+ fmap->md.prot = PROT_READ;
+ fmap->md.flags = 0;
+ } else if (fmap->prot == PR_PROT_READWRITE) {
+ fmap->md.prot = PROT_READ | PROT_WRITE;
+ fmap->md.flags = MAP_SHARED;
+ } else {
+ PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY);
+ fmap->md.prot = PROT_READ | PROT_WRITE;
+ fmap->md.flags = MAP_PRIVATE;
+ }
+ return PR_SUCCESS;
+}
+
+void * _MD_MemMap(
+ PRFileMap *fmap,
+ PRInt64 offset,
+ PRUint32 len)
+{
+ PRInt32 off;
+ void *addr;
+
+ LL_L2I(off, offset);
+ if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags,
+ fmap->fd->secret->md.osfd, off)) == (void *) -1) {
+ _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO());
+ addr = NULL;
+ }
+ return addr;
+}
+
+PRStatus _MD_MemUnmap(void *addr, PRUint32 len)
+{
+ if (munmap(addr, len) == 0) {
+ return PR_SUCCESS;
+ } else {
+ if (errno == EINVAL) {
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, errno);
+ } else {
+ PR_SetError(PR_UNKNOWN_ERROR, errno);
+ }
+ return PR_FAILURE;
+ }
+}
+
+PRStatus _MD_CloseFileMap(PRFileMap *fmap)
+{
+ PR_DELETE(fmap);
+ return PR_SUCCESS;
+}
diff --git a/pr/src/md/unix/unix_errors.c b/pr/src/md/unix/unix_errors.c
new file mode 100644
index 00000000..92d96c73
--- /dev/null
+++ b/pr/src/md/unix/unix_errors.c
@@ -0,0 +1,1484 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "prtypes.h"
+#include "md/_unix_errors.h"
+#include "prerror.h"
+#include <errno.h>
+
+void _MD_unix_map_opendir_error(int err)
+{
+ switch (err) {
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EMFILE:
+ PR_SetError(PR_PROC_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case ENFILE:
+ PR_SetError(PR_SYS_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_closedir_error(int err)
+{
+ switch (err) {
+ case EINVAL:
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_readdir_error(int err)
+{
+
+ switch (err) {
+ case ENOENT:
+ PR_SetError(PR_NO_MORE_FILES_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+#ifdef IRIX
+#ifdef IRIX5_3
+#else
+ case EDIRCORRUPTED:
+ PR_SetError(PR_DIRECTORY_CORRUPTED_ERROR, err);
+ break;
+#endif
+#endif
+#ifdef EOVERFLOW
+ case EOVERFLOW:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+#endif
+ case EINVAL:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+#ifdef EBADMSG
+ case EBADMSG:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+#endif
+ case EDEADLK:
+ PR_SetError(PR_DEADLOCK_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case ENOLCK:
+ PR_SetError(PR_FILE_IS_LOCKED_ERROR, err);
+ break;
+#ifdef ENOLINK
+ case ENOLINK:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#endif
+ case ENXIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_unlink_error(int err)
+{
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EBUSY:
+ PR_SetError(PR_FILESYSTEM_MOUNTED_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case EPERM:
+ PR_SetError(PR_IS_DIRECTORY_ERROR, err);
+ break;
+ case EROFS:
+ PR_SetError(PR_READ_ONLY_FILESYSTEM_ERROR, err);
+ break;
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_stat_error(int err)
+{
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ETIMEDOUT:
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+#endif
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+#ifdef EOVERFLOW
+ case EOVERFLOW:
+ PR_SetError(PR_BUFFER_OVERFLOW_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_fstat_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ETIMEDOUT:
+#ifdef ENOLINK
+ case ENOLINK:
+#endif
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#ifdef EOVERFLOW
+ case EOVERFLOW:
+ PR_SetError(PR_BUFFER_OVERFLOW_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_rename_error(int err)
+{
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EBUSY:
+ PR_SetError(PR_FILESYSTEM_MOUNTED_ERROR, err);
+ break;
+#ifdef EDQUOT
+ case EDQUOT:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+#endif
+ case EEXIST:
+ PR_SetError(PR_DIRECTORY_NOT_EMPTY_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case EISDIR:
+ PR_SetError(PR_IS_DIRECTORY_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#endif
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENOSPC:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case EROFS:
+ PR_SetError(PR_READ_ONLY_FILESYSTEM_ERROR, err);
+ break;
+ case EXDEV:
+ PR_SetError(PR_NOT_SAME_DEVICE_ERROR, err);
+ break;
+ case EMLINK:
+ PR_SetError(PR_MAX_DIRECTORY_ENTRIES_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_access_error(int err)
+{
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+#endif
+ case ETIMEDOUT:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case EROFS:
+ PR_SetError(PR_READ_ONLY_FILESYSTEM_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_mkdir_error(int err)
+{
+ switch (err) {
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EEXIST:
+ PR_SetError(PR_FILE_EXISTS_ERROR, err);
+ break;
+ case EROFS:
+ PR_SetError(PR_READ_ONLY_FILESYSTEM_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+ case EMLINK:
+ PR_SetError(PR_MAX_DIRECTORY_ENTRIES_ERROR, err);
+ break;
+ case ENOSPC:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+#ifdef EDQUOT
+ case EDQUOT:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+#endif
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_rmdir_error(int err)
+{
+
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EBUSY:
+ PR_SetError(PR_FILESYSTEM_MOUNTED_ERROR, err);
+ break;
+ case EEXIST:
+ PR_SetError(PR_DIRECTORY_NOT_EMPTY_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_DIRECTORY_NOT_EMPTY_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+#endif
+ case ETIMEDOUT:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case EROFS:
+ PR_SetError(PR_READ_ONLY_FILESYSTEM_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_read_error(int err)
+{
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+#ifdef EBADMSG
+ case EBADMSG:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+#endif
+ case EDEADLK:
+ PR_SetError(PR_DEADLOCK_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_METHOD_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case ENOLCK:
+ PR_SetError(PR_FILE_IS_LOCKED_ERROR, err);
+ break;
+ case ENXIO:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case EISDIR:
+ PR_SetError(PR_IS_DIRECTORY_ERROR, err);
+ break;
+ case ECONNRESET:
+ case EPIPE:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+#ifdef ENOLINK
+ case ENOLINK:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_write_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EDEADLK:
+ PR_SetError(PR_DEADLOCK_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EFBIG:
+ PR_SetError(PR_FILE_TOO_BIG_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_METHOD_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case ENOLCK:
+ PR_SetError(PR_FILE_IS_LOCKED_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ case ENOSPC:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+ case ENXIO:
+ PR_SetError(PR_INVALID_METHOD_ERROR, err);
+ break;
+ case ERANGE:
+ PR_SetError(PR_INVALID_METHOD_ERROR, err);
+ break;
+ case ETIMEDOUT:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case ECONNRESET:
+ case EPIPE:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+#ifdef EDQUOT
+ case EDQUOT:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+#endif
+#ifdef ENOLINK
+ case ENOLINK:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_lseek_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ESPIPE:
+ PR_SetError(PR_INVALID_METHOD_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_fsync_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+#ifdef ENOLINK
+ case ENOLINK:
+#endif
+ case ETIMEDOUT:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_METHOD_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_close_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+#ifdef ENOLINK
+ case ENOLINK:
+#endif
+ case ETIMEDOUT:
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_socket_error(int err)
+{
+ switch (err) {
+ case EPROTONOSUPPORT:
+ PR_SetError(PR_PROTOCOL_NOT_SUPPORTED_ERROR, err);
+ break;
+ case EMFILE:
+ PR_SetError(PR_PROC_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case ENFILE:
+ PR_SetError(PR_SYS_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+#if !defined(SCO_SV)
+ case ENOBUFS:
+#endif /* !defined(SCO_SV) */
+ case ENOMEM:
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_socketavailable_error(int err)
+{
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+}
+
+void _MD_unix_map_recv_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+ case ECONNRESET:
+ case EPIPE:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_recvfrom_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ case ECONNRESET:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_send_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EMSGSIZE:
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+#if !defined(SCO_SV)
+ case ENOBUFS:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif /* !defined(SCO_SV) */
+ case ECONNREFUSED:
+ PR_SetError(PR_CONNECT_REFUSED_ERROR, err);
+ break;
+ case EISCONN:
+ PR_SetError(PR_IS_CONNECTED_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ case ECONNRESET:
+ case EPIPE:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_sendto_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EMSGSIZE:
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+#if !defined(SCO_SV)
+ case ENOBUFS:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif /* !defined(SCO_SV) */
+ case ECONNREFUSED:
+ PR_SetError(PR_CONNECT_REFUSED_ERROR, err);
+ break;
+ case EISCONN:
+ PR_SetError(PR_IS_CONNECTED_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ case ECONNRESET:
+ case EPIPE:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_writev_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case ECONNRESET:
+ case EPIPE:
+ PR_SetError(PR_CONNECT_RESET_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_accept_error(int err)
+{
+ switch (err) {
+ case EAGAIN:
+#if EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ PR_SetError(PR_WOULD_BLOCK_ERROR, err);
+ break;
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EOPNOTSUPP:
+ case ENODEV:
+ PR_SetError(PR_NOT_TCP_SOCKET_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EMFILE:
+ PR_SetError(PR_PROC_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case ENFILE:
+ PR_SetError(PR_SYS_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+#ifdef EPROTO
+ case EPROTO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_connect_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EADDRNOTAVAIL:
+ PR_SetError(PR_ADDRESS_NOT_AVAILABLE_ERROR, err);
+ break;
+ case EINPROGRESS:
+ PR_SetError(PR_IN_PROGRESS_ERROR, err);
+ break;
+ case EALREADY:
+ PR_SetError(PR_ALREADY_INITIATED_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EAFNOSUPPORT:
+ PR_SetError(PR_ADDRESS_NOT_SUPPORTED_ERROR, err);
+ break;
+ case EISCONN:
+ PR_SetError(PR_IS_CONNECTED_ERROR, err);
+ break;
+ case ETIMEDOUT:
+ PR_SetError(PR_IO_TIMEOUT_ERROR, err);
+ break;
+ case ECONNREFUSED:
+ PR_SetError(PR_CONNECT_REFUSED_ERROR, err);
+ break;
+ case ENETUNREACH:
+ PR_SetError(PR_NETWORK_UNREACHABLE_ERROR, err);
+ break;
+ case EADDRINUSE:
+ PR_SetError(PR_ADDRESS_IN_USE_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ /*
+ * UNIX domain sockets are not supported in NSPR
+ */
+ case EACCES:
+ PR_SetError(PR_ADDRESS_NOT_SUPPORTED_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case EIO:
+#if defined(UNIXWARE) || defined(SNI) || defined(NEC)
+ /*
+ * On some platforms, if we connect to a port on
+ * the local host (the loopback address) that no
+ * process is listening on, we get EIO instead
+ * of ECONNREFUSED.
+ */
+ PR_SetError(PR_CONNECT_REFUSED_ERROR, err);
+#else
+ PR_SetError(PR_IO_ERROR, err);
+#endif
+ break;
+ case ELOOP:
+ PR_SetError(PR_ADDRESS_NOT_SUPPORTED_ERROR, err);
+ break;
+ case ENOENT:
+ PR_SetError(PR_ADDRESS_NOT_SUPPORTED_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ case ENXIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case EPROTOTYPE:
+ PR_SetError(PR_ADDRESS_NOT_SUPPORTED_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_bind_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EADDRNOTAVAIL:
+ PR_SetError(PR_ADDRESS_NOT_AVAILABLE_ERROR, err);
+ break;
+ case EADDRINUSE:
+ PR_SetError(PR_ADDRESS_IN_USE_ERROR, err);
+ break;
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_SOCKET_ADDRESS_IS_BOUND_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ /*
+ * UNIX domain sockets are not supported in NSPR
+ */
+ case EIO:
+ case EISDIR:
+ case ELOOP:
+ case ENOENT:
+ case ENOTDIR:
+ case EROFS:
+ PR_SetError(PR_ADDRESS_NOT_SUPPORTED_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_listen_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EOPNOTSUPP:
+ PR_SetError(PR_NOT_TCP_SOCKET_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_shutdown_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case ENOTCONN:
+ PR_SetError(PR_NOT_CONNECTED_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+#endif
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_socketpair_error(int err)
+{
+ switch (err) {
+ case EMFILE:
+ PR_SetError(PR_PROC_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case ENOMEM:
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ case EAFNOSUPPORT:
+ case EPROTONOSUPPORT:
+ case EOPNOTSUPP:
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_getsockname_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+#if !defined(SCO_SV)
+ case ENOBUFS:
+#endif /* !defined(SCO_SV) */
+ case ENOMEM:
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_getpeername_error(int err)
+{
+
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case ENOTCONN:
+ PR_SetError(PR_NOT_CONNECTED_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+#if !defined(SCO_SV)
+ case ENOBUFS:
+#endif /* !defined(SCO_SV) */
+ case ENOMEM:
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_getsockopt_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case ENOPROTOOPT:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_BUFFER_OVERFLOW_ERROR, err);
+ break;
+ case ENOMEM:
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_setsockopt_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case ENOTSOCK:
+ PR_SetError(PR_NOT_SOCKET_ERROR, err);
+ break;
+ case ENOPROTOOPT:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_BUFFER_OVERFLOW_ERROR, err);
+ break;
+ case ENOMEM:
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_open_error(int err)
+{
+ switch (err) {
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case EAGAIN:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ case EBUSY:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case EEXIST:
+ PR_SetError(PR_FILE_EXISTS_ERROR, err);
+ break;
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ case EIO:
+ PR_SetError(PR_IO_ERROR, err);
+ break;
+ case EISDIR:
+ PR_SetError(PR_IS_DIRECTORY_ERROR, err);
+ break;
+ case ELOOP:
+ PR_SetError(PR_LOOP_ERROR, err);
+ break;
+ case EMFILE:
+ PR_SetError(PR_PROC_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case ENAMETOOLONG:
+ PR_SetError(PR_NAME_TOO_LONG_ERROR, err);
+ break;
+ case ENFILE:
+ PR_SetError(PR_SYS_DESC_TABLE_FULL_ERROR, err);
+ break;
+ case ENODEV:
+ case ENOENT:
+ case ENXIO:
+ PR_SetError(PR_FILE_NOT_FOUND_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ case ENOSPC:
+ PR_SetError(PR_NO_DEVICE_SPACE_ERROR, err);
+ break;
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ case ENOTDIR:
+ PR_SetError(PR_NOT_DIRECTORY_ERROR, err);
+ break;
+ case EPERM:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case ETIMEDOUT:
+#if !defined(OSF1) && !defined(FREEBSD) && !defined(BSDI)
+ case EMULTIHOP:
+ case ENOLINK:
+#endif
+ PR_SetError(PR_REMOTE_FILE_ERROR, err);
+ break;
+ case EROFS:
+ PR_SetError(PR_READ_ONLY_FILESYSTEM_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_mmap_error(int err)
+{
+
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EAGAIN:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ case EACCES:
+ PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, err);
+ break;
+ case ENOMEM:
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_gethostname_error(int err)
+{
+ switch (err) {
+ case EFAULT:
+ PR_SetError(PR_ACCESS_FAULT_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_select_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EINTR:
+ PR_SetError(PR_PENDING_INTERRUPT_ERROR, err);
+ break;
+ case EINVAL:
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_flock_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ case EINVAL:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EWOULDBLOCK:
+ PR_SetError(PR_FILE_IS_LOCKED_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+void _MD_unix_map_lockf_error(int err)
+{
+ switch (err) {
+ case EBADF:
+ PR_SetError(PR_BAD_DESCRIPTOR_ERROR, err);
+ break;
+ case EACCES:
+ PR_SetError(PR_FILE_IS_LOCKED_ERROR, err);
+ break;
+ case EDEADLK:
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, err);
+ break;
+ default:
+ PR_SetError(PR_UNKNOWN_ERROR, err);
+ break;
+ }
+}
+
+#ifdef HPUX11
+void _MD_hpux_map_sendfile_error(int oserror)
+{
+ PRErrorCode prerror;
+
+ switch (oserror) {
+ case ENOTSOCK:
+ prerror = PR_NOT_SOCKET_ERROR;
+ break;
+ case EFAULT:
+ prerror = PR_ACCESS_FAULT_ERROR;
+ break;
+ case ENOBUFS:
+ prerror = PR_INSUFFICIENT_RESOURCES_ERROR;
+ break;
+ case EINVAL:
+ prerror = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ case ENOTCONN:
+ prerror = PR_NOT_CONNECTED_ERROR;
+ break;
+ case EPIPE:
+ prerror = PR_CONNECT_RESET_ERROR;
+ break;
+ case ENOMEM:
+ prerror = PR_OUT_OF_MEMORY_ERROR;
+ break;
+ case EOPNOTSUPP:
+ prerror = PR_NOT_TCP_SOCKET_ERROR;
+ break;
+ default:
+ prerror = PR_UNKNOWN_ERROR;
+ }
+ PR_SetError(prerror, oserror);
+}
+#endif /* HPUX11 */
diff --git a/pr/src/md/unix/unixware.c b/pr/src/md/unix/unixware.c
new file mode 100644
index 00000000..4bc410bf
--- /dev/null
+++ b/pr/src/md/unix/unixware.c
@@ -0,0 +1,548 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+#include "primpl.h"
+
+#if !defined (USE_SVR4_THREADS)
+
+/*
+ * using only NSPR threads here
+ */
+
+#include <setjmp.h>
+
+void _MD_EarlyInit(void)
+{
+}
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) setjmp(CONTEXT(t));
+ }
+ *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
+ return (PRWord *) CONTEXT(t);
+}
+
+#ifdef ALARMS_BREAK_TCP /* I don't think they do */
+
+PRInt32 _MD_connect(PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _connect(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+}
+
+PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen,
+ PRIntervalTime timeout)
+{
+ PRInt32 rv;
+
+ _MD_BLOCK_CLOCK_INTERRUPTS();
+ rv = _accept(osfd,addr,addrlen);
+ _MD_UNBLOCK_CLOCK_INTERRUPTS();
+ return(rv);
+}
+#endif
+
+/*
+ * These are also implemented in pratom.c using NSPR locks. Any reason
+ * this might be better or worse? If you like this better, define
+ * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h
+ */
+#ifdef _PR_HAVE_ATOMIC_OPS
+/* Atomic operations */
+#include <stdio.h>
+static FILE *_uw_semf;
+
+void
+_MD_INIT_ATOMIC(void)
+{
+ /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */
+ if ((_uw_semf = tmpfile()) == NULL)
+ PR_ASSERT(0);
+
+ return;
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)++;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ flockfile(_uw_semf);
+ (*val)--;
+ unflockfile(_uw_semf);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ flockfile(_uw_semf);
+ *val = newval;
+ unflockfile(_uw_semf);
+}
+#endif
+
+void
+_MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri)
+{
+ return;
+}
+
+PRStatus
+_MD_InitializeThread(PRThread *thread)
+{
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ _PR_MD_SWITCH_CONTEXT(thread);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread) {
+ PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE));
+ }
+ return PR_SUCCESS;
+}
+
+/* These functions should not be called for Unixware */
+void
+_MD_YIELD(void)
+{
+ PR_NOT_REACHED("_MD_YIELD should not be called for Unixware.");
+}
+
+PRStatus
+_MD_CREATE_THREAD(
+ PRThread *thread,
+ void (*start) (void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware.");
+}
+
+#else /* USE_SVR4_THREADS */
+
+/* NOTE:
+ * SPARC v9 (Ultras) do have an atomic test-and-set operation. But
+ * SPARC v8 doesn't. We should detect in the init if we are running on
+ * v8 or v9, and then use assembly where we can.
+ */
+
+#include <thread.h>
+#include <synch.h>
+
+static mutex_t _unixware_atomic = DEFAULTMUTEX;
+
+#define TEST_THEN_ADD(where, inc) \
+ if (mutex_lock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);\
+ *where += inc;\
+ if (mutex_unlock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);
+
+#define TEST_THEN_SET(where, val) \
+ if (mutex_lock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);\
+ *where = val;\
+ if (mutex_unlock(&_unixware_atomic) != 0)\
+ PR_ASSERT(0);
+
+void
+_MD_INIT_ATOMIC(void)
+{
+}
+
+void
+_MD_ATOMIC_INCREMENT(PRInt32 *val)
+{
+ TEST_THEN_ADD(val, 1);
+}
+
+void
+_MD_ATOMIC_DECREMENT(PRInt32 *val)
+{
+ TEST_THEN_ADD(val, 0xffffffff);
+}
+
+void
+_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
+{
+ TEST_THEN_SET(val, newval);
+}
+
+#include <signal.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <sys/lwp.h>
+#include <sys/procfs.h>
+#include <sys/syscall.h>
+
+
+THREAD_KEY_T threadid_key;
+THREAD_KEY_T cpuid_key;
+THREAD_KEY_T last_thread_key;
+static sigset_t set, oldset;
+
+void _MD_EarlyInit(void)
+{
+ THR_KEYCREATE(&threadid_key, NULL);
+ THR_KEYCREATE(&cpuid_key, NULL);
+ THR_KEYCREATE(&last_thread_key, NULL);
+ sigemptyset(&set);
+ sigaddset(&set, SIGALRM);
+}
+
+PRStatus _MD_CREATE_THREAD(PRThread *thread,
+ void (*start)(void *),
+ PRThreadPriority priority,
+ PRThreadScope scope,
+ PRThreadState state,
+ PRUint32 stackSize)
+{
+ long flags;
+
+ /* mask out SIGALRM for native thread creation */
+ thr_sigsetmask(SIG_BLOCK, &set, &oldset);
+
+ flags = (state == PR_JOINABLE_THREAD ? THR_SUSPENDED/*|THR_NEW_LWP*/
+ : THR_SUSPENDED|THR_DETACHED/*|THR_NEW_LWP*/);
+ if (thread->flags & _PR_GCABLE_THREAD)
+ flags |= THR_BOUND;
+
+ if (thr_create(NULL, thread->stack->stackSize,
+ (void *(*)(void *)) start, (void *) thread,
+ flags,
+ &thread->md.handle)) {
+ thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
+ return PR_FAILURE;
+ }
+
+
+ /* When the thread starts running, then the lwpid is set to the right
+ * value. Until then we want to mark this as 'uninit' so that
+ * its register state is initialized properly for GC */
+
+ thread->md.lwpid = -1;
+ thr_sigsetmask(SIG_SETMASK, &oldset, NULL);
+ _MD_NEW_SEM(&thread->md.waiter_sem, 0);
+
+ if (scope == PR_GLOBAL_THREAD) {
+ thread->flags |= _PR_GLOBAL_SCOPE;
+ }
+
+ /*
+ ** Set the thread priority. This will also place the thread on
+ ** the runQ.
+ **
+ ** Force PR_SetThreadPriority to set the priority by
+ ** setting thread->priority to 100.
+ */
+ {
+ int pri;
+ pri = thread->priority;
+ thread->priority = 100;
+ PR_SetThreadPriority( thread, pri );
+
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("(0X%x)[Start]: on to runq at priority %d",
+ thread, thread->priority));
+ }
+
+ /* Activate the thread */
+ if (thr_continue( thread->md.handle ) ) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+void _MD_cleanup_thread(PRThread *thread)
+{
+ thread_t hdl;
+ PRMonitor *mon;
+
+ hdl = thread->md.handle;
+
+ /*
+ ** First, suspend the thread (unless it's the active one)
+ ** Because we suspend it first, we don't have to use LOCK_SCHEDULER to
+ ** prevent both of us modifying the thread structure at the same time.
+ */
+ if ( thread != _PR_MD_CURRENT_THREAD() ) {
+ thr_suspend(hdl);
+ }
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("(0X%x)[DestroyThread]\n", thread));
+
+ _MD_DESTROY_SEM(&thread->md.waiter_sem);
+}
+
+void _MD_SET_PRIORITY(_MDThread *md_thread, PRUintn newPri)
+{
+ if(thr_setprio((thread_t)md_thread->handle, newPri)) {
+ PR_LOG(_pr_thread_lm, PR_LOG_MIN,
+ ("_PR_SetThreadPriority: can't set thread priority\n"));
+ }
+}
+
+void _MD_WAIT_CV(
+ struct _MDCVar *md_cv, struct _MDLock *md_lock, PRIntervalTime timeout)
+{
+ struct timespec tt;
+ PRUint32 msec;
+ int rv;
+ PRThread *me = _PR_MD_CURRENT_THREAD();
+
+ msec = PR_IntervalToMilliseconds(timeout);
+
+ GETTIME (&tt);
+
+ tt.tv_sec += msec / PR_MSEC_PER_SEC;
+ tt.tv_nsec += (msec % PR_MSEC_PER_SEC) * PR_NSEC_PER_MSEC;
+ /* Check for nsec overflow - otherwise we'll get an EINVAL */
+ if (tt.tv_nsec >= PR_NSEC_PER_SEC) {
+ tt.tv_sec++;
+ tt.tv_nsec -= PR_NSEC_PER_SEC;
+ }
+ me->md.sp = unixware_getsp();
+
+
+ /* XXX Solaris 2.5.x gives back EINTR occasionally for no reason
+ * hence ignore EINTR for now */
+
+ COND_TIMEDWAIT(&md_cv->cv, &md_lock->lock, &tt);
+}
+
+void _MD_lock(struct _MDLock *md_lock)
+{
+ mutex_lock(&md_lock->lock);
+}
+
+void _MD_unlock(struct _MDLock *md_lock)
+{
+ mutex_unlock(&((md_lock)->lock));
+}
+
+
+PRThread *_pr_current_thread_tls()
+{
+ PRThread *ret;
+
+ thr_getspecific(threadid_key, (void **)&ret);
+ return ret;
+}
+
+PRStatus
+_MD_WAIT(PRThread *thread, PRIntervalTime ticks)
+{
+ _MD_WAIT_SEM(&thread->md.waiter_sem);
+ return PR_SUCCESS;
+}
+
+PRStatus
+_MD_WAKEUP_WAITER(PRThread *thread)
+{
+ if (thread == NULL) {
+ return PR_SUCCESS;
+ }
+ _MD_POST_SEM(&thread->md.waiter_sem);
+ return PR_SUCCESS;
+}
+
+_PRCPU *_pr_current_cpu_tls()
+{
+ _PRCPU *ret;
+
+ thr_getspecific(cpuid_key, (void **)&ret);
+ return ret;
+}
+
+PRThread *_pr_last_thread_tls()
+{
+ PRThread *ret;
+
+ thr_getspecific(last_thread_key, (void **)&ret);
+ return ret;
+}
+
+_MDLock _pr_ioq_lock;
+
+void _MD_INIT_IO (void)
+{
+ _MD_NEW_LOCK(&_pr_ioq_lock);
+}
+
+PRStatus _MD_InitializeThread(PRThread *thread)
+{
+ if (!_PR_IS_NATIVE_THREAD(thread))
+ return;
+ /* prime the sp; substract 4 so we don't hit the assert that
+ * curr sp > base_stack
+ */
+ thread->md.sp = (uint_t) thread->stack->allocBase - sizeof(long);
+ thread->md.lwpid = _lwp_self();
+ thread->md.handle = THR_SELF();
+
+ /* all threads on Solaris are global threads from NSPR's perspective
+ * since all of them are mapped to Solaris threads.
+ */
+ thread->flags |= _PR_GLOBAL_SCOPE;
+
+ /* For primordial/attached thread, we don't create an underlying native thread.
+ * So, _MD_CREATE_THREAD() does not get called. We need to do initialization
+ * like allocating thread's synchronization variables and set the underlying
+ * native thread's priority.
+ */
+ if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) {
+ _MD_NEW_SEM(&thread->md.waiter_sem, 0);
+ _MD_SET_PRIORITY(&(thread->md), thread->priority);
+ }
+ return PR_SUCCESS;
+}
+
+static sigset_t old_mask; /* store away original gc thread sigmask */
+static int gcprio; /* store away original gc thread priority */
+static lwpid_t *all_lwps=NULL; /* list of lwps that we suspended */
+static int num_lwps ;
+static int suspendAllOn = 0;
+
+#define VALID_SP(sp, bottom, top) \
+ (((uint_t)(sp)) > ((uint_t)(bottom)) && ((uint_t)(sp)) < ((uint_t)(top)))
+
+void unixware_preempt_off()
+{
+ sigset_t set;
+ (void)sigfillset(&set);
+ sigprocmask (SIG_SETMASK, &set, &old_mask);
+}
+
+void unixware_preempt_on()
+{
+ sigprocmask (SIG_SETMASK, &old_mask, NULL);
+}
+
+void _MD_Begin_SuspendAll()
+{
+ unixware_preempt_off();
+
+ PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin_SuspendAll\n"));
+ /* run at highest prio so I cannot be preempted */
+ thr_getprio(thr_self(), &gcprio);
+ thr_setprio(thr_self(), 0x7fffffff);
+ suspendAllOn = 1;
+}
+
+void _MD_End_SuspendAll()
+{
+}
+
+void _MD_End_ResumeAll()
+{
+ PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("End_ResumeAll\n"));
+ thr_setprio(thr_self(), gcprio);
+ unixware_preempt_on();
+ suspendAllOn = 0;
+}
+
+void _MD_Suspend(PRThread *thr)
+{
+ int lwp_fd, result;
+ int lwp_main_proc_fd = 0;
+
+ thr_suspend(thr->md.handle);
+ if (!(thr->flags & _PR_GCABLE_THREAD))
+ return;
+ /* XXX Primordial thread can't be bound to an lwp, hence there is no
+ * way we can assume that we can get the lwp status for primordial
+ * thread reliably. Hence we skip this for primordial thread, hoping
+ * that the SP is saved during lock and cond. wait.
+ * XXX - Again this is concern only for java interpreter, not for the
+ * server, 'cause primordial thread in the server does not do java work
+ */
+ if (thr->flags & _PR_PRIMORDIAL)
+ return;
+
+ /* if the thread is not started yet then don't do anything */
+ if (!suspendAllOn || thr->md.lwpid == -1)
+ return;
+
+}
+void _MD_Resume(PRThread *thr)
+{
+ if (!(thr->flags & _PR_GCABLE_THREAD) || !suspendAllOn){
+ /*XXX When the suspendAllOn is set, we will be trying to do lwp_suspend
+ * during that time we can't call any thread lib or libc calls. Hence
+ * make sure that no resume is requested for Non gcable thread
+ * during suspendAllOn */
+ PR_ASSERT(!suspendAllOn);
+ thr_continue(thr->md.handle);
+ return;
+ }
+ if (thr->md.lwpid == -1)
+ return;
+
+ if ( _lwp_continue(thr->md.lwpid) < 0) {
+ PR_ASSERT(0); /* ARGH, we are hosed! */
+ }
+}
+
+
+PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
+{
+ if (isCurrent) {
+ (void) getcontext(CONTEXT(t)); /* XXX tune me: set md_IRIX.c */
+ }
+ *np = NGREG;
+ if (t->md.lwpid == -1)
+ memset(&t->md.context.uc_mcontext.gregs[0], 0, NGREG * sizeof(PRWord));
+ return (PRWord*) &t->md.context.uc_mcontext.gregs[0];
+}
+
+int
+_pr_unixware_clock_gettime (struct timespec *tp)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ tp->tv_sec = tv.tv_sec;
+ tp->tv_nsec = tv.tv_usec * 1000;
+ return 0;
+}
+
+
+#endif /* USE_SVR4_THREADS */
diff --git a/pr/src/md/unix/uxproces.c b/pr/src/md/unix/uxproces.c
new file mode 100644
index 00000000..7f8ab887
--- /dev/null
+++ b/pr/src/md/unix/uxproces.c
@@ -0,0 +1,715 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+#include "primpl.h"
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/wait.h>
+
+/*
+ **********************************************************************
+ *
+ * The Unix process routines
+ *
+ **********************************************************************
+ */
+
+#define _PR_SIGNALED_EXITSTATUS 256
+
+typedef enum pr_PidState {
+ _PR_PID_DETACHED,
+ _PR_PID_REAPED,
+ _PR_PID_WAITING
+} pr_PidState;
+
+typedef struct pr_PidRecord {
+ pid_t pid;
+ int exitStatus;
+ pr_PidState state;
+ PRCondVar *reapedCV;
+ struct pr_PidRecord *next;
+} pr_PidRecord;
+
+/*
+ * Irix sprocs and LinuxThreads are actually a kind of processes
+ * that can share the virtual address space and file descriptors.
+ */
+#if (defined(IRIX) && !defined(_PR_PTHREADS)) \
+ || (defined(LINUX) && defined(_PR_PTHREADS))
+#define _PR_SHARE_CLONES
+#endif
+
+/*
+ * The macro _PR_NATIVE_THREADS indicates that we are
+ * using native threads only, so waitpid() blocks just the
+ * calling thread, not the process. In this case, the waitpid
+ * daemon thread can safely block in waitpid(). So we don't
+ * need to catch SIGCHLD, and the pipe to unblock PR_Poll() is
+ * also not necessary.
+ */
+
+#if defined(_PR_GLOBAL_THREADS_ONLY) \
+ || (defined(_PR_PTHREADS) && !defined(LINUX))
+#define _PR_NATIVE_THREADS
+#endif
+
+/*
+ * All the static variables used by the Unix process routines are
+ * collected in this structure.
+ */
+
+static struct {
+ PRCallOnceType once;
+ PRThread *thread;
+ PRLock *ml;
+#if defined(_PR_NATIVE_THREADS)
+ PRInt32 numProcs;
+ PRCondVar *cv;
+#else
+ int pipefd[2];
+#endif
+ pr_PidRecord **pidTable;
+
+#ifdef _PR_SHARE_CLONES
+ struct pr_CreateProcOp *opHead, *opTail;
+#endif
+} pr_wp;
+
+#ifdef _PR_SHARE_CLONES
+static int pr_waitpid_daemon_exit;
+
+void
+_MD_unix_terminate_waitpid_daemon(void)
+{
+ if (pr_wp.thread) {
+ pr_waitpid_daemon_exit = 1;
+ write(pr_wp.pipefd[1], "", 1);
+ PR_JoinThread(pr_wp.thread);
+ }
+}
+#endif
+
+static PRStatus _MD_InitProcesses(void);
+#if !defined(_PR_NATIVE_THREADS)
+static void pr_InstallSigchldHandler(void);
+#endif
+
+static PRProcess *
+ForkAndExec(
+ const char *path,
+ char *const *argv,
+ char *const *envp,
+ const PRProcessAttr *attr)
+{
+ PRProcess *process;
+
+ process = PR_NEW(PRProcess);
+ if (!process) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return NULL;
+ }
+
+ process->md.pid = fork();
+ if ((pid_t) -1 == process->md.pid) {
+ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, errno);
+ PR_DELETE(process);
+ return NULL;
+ } else if (0 == process->md.pid) { /* the child process */
+ /*
+ * If the child process needs to exit, it must call _exit().
+ * Do not call exit(), because exit() will flush and close
+ * the standard I/O file descriptors, and hence corrupt
+ * the parent process's standard I/O data structures.
+ */
+
+ if (attr) {
+ if (attr->stdinFd
+ && attr->stdinFd->secret->md.osfd != 0) {
+ if (dup2(attr->stdinFd->secret->md.osfd, 0) != 0) {
+ _exit(1); /* failed */
+ }
+ close(attr->stdinFd->secret->md.osfd);
+ }
+ if (attr->stdoutFd
+ && attr->stdoutFd->secret->md.osfd != 1) {
+ if (dup2(attr->stdoutFd->secret->md.osfd, 1) != 1) {
+ _exit(1); /* failed */
+ }
+ close(attr->stdoutFd->secret->md.osfd);
+ }
+ if (attr->stderrFd
+ && attr->stderrFd->secret->md.osfd != 2) {
+ if (dup2(attr->stderrFd->secret->md.osfd, 2) != 2) {
+ _exit(1); /* failed */
+ }
+ close(attr->stderrFd->secret->md.osfd);
+ }
+ }
+
+ (void)execve(path, argv, envp);
+ /* Whoops! It returned. That's a bad sign. */
+ _exit(1);
+ }
+
+#if defined(_PR_NATIVE_THREADS)
+ PR_Lock(pr_wp.ml);
+ if (0 == pr_wp.numProcs++) {
+ PR_NotifyCondVar(pr_wp.cv);
+ }
+ PR_Unlock(pr_wp.ml);
+#endif
+ return process;
+}
+
+#ifdef _PR_SHARE_CLONES
+
+struct pr_CreateProcOp {
+ const char *path;
+ char *const *argv;
+ char *const *envp;
+ const PRProcessAttr *attr;
+ PRProcess *process;
+ PRErrorCode prerror;
+ PRInt32 oserror;
+ PRBool done;
+ PRCondVar *doneCV;
+ struct pr_CreateProcOp *next;
+};
+
+PRProcess *
+_MD_CreateUnixProcess(
+ const char *path,
+ char *const *argv,
+ char *const *envp,
+ const PRProcessAttr *attr)
+{
+ struct pr_CreateProcOp *op;
+ PRProcess *proc;
+ int rv;
+
+ if (PR_CallOnce(&pr_wp.once, _MD_InitProcesses) == PR_FAILURE) {
+ return NULL;
+ }
+
+ op = PR_NEW(struct pr_CreateProcOp);
+ if (NULL == op) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ return NULL;
+ }
+ op->path = path;
+ op->argv = argv;
+ op->envp = envp;
+ op->attr = attr;
+ op->done = PR_FALSE;
+ op->doneCV = PR_NewCondVar(pr_wp.ml);
+ if (NULL == op->doneCV) {
+ PR_DELETE(op);
+ return NULL;
+ }
+ PR_Lock(pr_wp.ml);
+
+ /* add to the tail of op queue */
+ op->next = NULL;
+ if (pr_wp.opTail) {
+ pr_wp.opTail->next = op;
+ pr_wp.opTail = op;
+ } else {
+ PR_ASSERT(NULL == pr_wp.opHead);
+ pr_wp.opHead = pr_wp.opTail = op;
+ }
+
+ /* wake up the daemon thread */
+ do {
+ rv = write(pr_wp.pipefd[1], "", 1);
+ } while (-1 == rv && EINTR == errno);
+
+ while (op->done == PR_FALSE) {
+ PR_WaitCondVar(op->doneCV, PR_INTERVAL_NO_TIMEOUT);
+ }
+ PR_Unlock(pr_wp.ml);
+ PR_DestroyCondVar(op->doneCV);
+ proc = op->process;
+ if (!proc) {
+ PR_SetError(op->prerror, op->oserror);
+ }
+ PR_DELETE(op);
+ return proc;
+}
+
+#else /* ! _PR_SHARE_CLONES */
+
+PRProcess *
+_MD_CreateUnixProcess(
+ const char *path,
+ char *const *argv,
+ char *const *envp,
+ const PRProcessAttr *attr)
+{
+ if (PR_CallOnce(&pr_wp.once, _MD_InitProcesses) == PR_FAILURE) {
+ return NULL;
+ }
+ return ForkAndExec(path, argv, envp, attr);
+} /* _MD_CreateUnixProcess */
+
+#endif /* _PR_SHARE_CLONES */
+
+/*
+ * The pid table is a hashtable.
+ *
+ * The number of buckets in the hashtable (NBUCKETS) must be a power of 2.
+ */
+#define NBUCKETS_LOG2 6
+#define NBUCKETS (1 << NBUCKETS_LOG2)
+#define PID_HASH_MASK ((pid_t) (NBUCKETS - 1))
+
+static pr_PidRecord *
+FindPidTable(pid_t pid)
+{
+ pr_PidRecord *pRec;
+ int keyHash = (int) (pid & PID_HASH_MASK);
+
+ pRec = pr_wp.pidTable[keyHash];
+ while (pRec) {
+ if (pRec->pid == pid) {
+ break;
+ }
+ pRec = pRec->next;
+ }
+ return pRec;
+}
+
+static void
+InsertPidTable(pr_PidRecord *pRec)
+{
+ int keyHash = (int) (pRec->pid & PID_HASH_MASK);
+
+ pRec->next = pr_wp.pidTable[keyHash];
+ pr_wp.pidTable[keyHash] = pRec;
+}
+
+static void
+DeletePidTable(pr_PidRecord *pRec)
+{
+ int keyHash = (int) (pRec->pid & PID_HASH_MASK);
+
+ if (pr_wp.pidTable[keyHash] == pRec) {
+ pr_wp.pidTable[keyHash] = pRec->next;
+ } else {
+ pr_PidRecord *pred, *cur; /* predecessor and current */
+
+ pred = pr_wp.pidTable[keyHash];
+ cur = pred->next;
+ while (cur) {
+ if (cur == pRec) {
+ pred->next = cur->next;
+ break;
+ }
+ pred = cur;
+ cur = cur->next;
+ }
+ PR_ASSERT(cur != NULL);
+ }
+}
+
+static int
+ExtractExitStatus(int rawExitStatus)
+{
+ /*
+ * We did not specify the WCONTINUED and WUNTRACED options
+ * for waitpid, so these two events should not be reported.
+ */
+ PR_ASSERT(!WIFSTOPPED(rawExitStatus));
+#ifdef WIFCONTINUED
+ PR_ASSERT(!WIFCONTINUED(rawExitStatus));
+#endif
+ if (WIFEXITED(rawExitStatus)) {
+ return WEXITSTATUS(rawExitStatus);
+ } else {
+ PR_ASSERT(WIFSIGNALED(rawExitStatus));
+ return _PR_SIGNALED_EXITSTATUS;
+ }
+}
+
+static void
+ProcessReapedChildInternal(pid_t pid, int status)
+{
+ pr_PidRecord *pRec;
+
+ pRec = FindPidTable(pid);
+ if (NULL == pRec) {
+ pRec = PR_NEW(pr_PidRecord);
+ pRec->pid = pid;
+ pRec->state = _PR_PID_REAPED;
+ pRec->exitStatus = ExtractExitStatus(status);
+ pRec->reapedCV = NULL;
+ InsertPidTable(pRec);
+ } else {
+ PR_ASSERT(pRec->state != _PR_PID_REAPED);
+ if (_PR_PID_DETACHED == pRec->state) {
+ PR_ASSERT(NULL == pRec->reapedCV);
+ DeletePidTable(pRec);
+ PR_DELETE(pRec);
+ } else {
+ PR_ASSERT(_PR_PID_WAITING == pRec->state);
+ PR_ASSERT(NULL != pRec->reapedCV);
+ pRec->exitStatus = ExtractExitStatus(status);
+ pRec->state = _PR_PID_REAPED;
+ PR_NotifyCondVar(pRec->reapedCV);
+ }
+ }
+}
+
+#if defined(_PR_NATIVE_THREADS)
+
+/*
+ * If all the threads are native threads, the daemon thread is
+ * simpler. We don't need to catch the SIGCHLD signal. We can
+ * just have the daemon thread block in waitpid().
+ */
+
+static void WaitPidDaemonThread(void *unused)
+{
+ pid_t pid;
+ int status;
+
+ while (1) {
+ PR_Lock(pr_wp.ml);
+ while (0 == pr_wp.numProcs) {
+ PR_WaitCondVar(pr_wp.cv, PR_INTERVAL_NO_TIMEOUT);
+ }
+ PR_Unlock(pr_wp.ml);
+
+ while (1) {
+ do {
+ pid = waitpid((pid_t) -1, &status, 0);
+ } while ((pid_t) -1 == pid && EINTR == errno);
+
+ /*
+ * waitpid() cannot return 0 because we did not invoke it
+ * with the WNOHANG option.
+ */
+ PR_ASSERT(0 != pid);
+
+ /*
+ * The only possible error code is ECHILD. But if we do
+ * our accounting correctly, we should only call waitpid()
+ * when there is a child process to wait for.
+ */
+ PR_ASSERT((pid_t) -1 != pid);
+ if ((pid_t) -1 == pid) {
+ break;
+ }
+
+ PR_Lock(pr_wp.ml);
+ ProcessReapedChildInternal(pid, status);
+ pr_wp.numProcs--;
+ while (0 == pr_wp.numProcs) {
+ PR_WaitCondVar(pr_wp.cv, PR_INTERVAL_NO_TIMEOUT);
+ }
+ PR_Unlock(pr_wp.ml);
+ }
+ }
+}
+
+#else /* _PR_NATIVE_THREADS */
+
+static void WaitPidDaemonThread(void *unused)
+{
+ PRPollDesc pd;
+ PRFileDesc *fd;
+ int rv;
+ char buf[128];
+ pid_t pid;
+ int status;
+#ifdef _PR_SHARE_CLONES
+ struct pr_CreateProcOp *op;
+#endif
+
+#ifdef _PR_SHARE_CLONES
+ pr_InstallSigchldHandler();
+#endif
+
+ fd = PR_ImportFile(pr_wp.pipefd[0]);
+ PR_ASSERT(NULL != fd);
+ pd.fd = fd;
+ pd.in_flags = PR_POLL_READ;
+
+ while (1) {
+ rv = PR_Poll(&pd, 1, PR_INTERVAL_NO_TIMEOUT);
+ PR_ASSERT(1 == rv);
+
+#ifdef _PR_SHARE_CLONES
+ if (pr_waitpid_daemon_exit) {
+ return;
+ }
+ PR_Lock(pr_wp.ml);
+#endif
+
+ do {
+ rv = read(pr_wp.pipefd[0], buf, sizeof(buf));
+ } while (sizeof(buf) == rv || (-1 == rv && EINTR == errno));
+
+#ifdef _PR_SHARE_CLONES
+ PR_Unlock(pr_wp.ml);
+ while ((op = pr_wp.opHead) != NULL) {
+ op->process = ForkAndExec(op->path, op->argv,
+ op->envp, op->attr);
+ if (NULL == op->process) {
+ op->prerror = PR_GetError();
+ op->oserror = PR_GetOSError();
+ }
+ PR_Lock(pr_wp.ml);
+ pr_wp.opHead = op->next;
+ if (NULL == pr_wp.opHead) {
+ pr_wp.opTail = NULL;
+ }
+ op->done = PR_TRUE;
+ PR_NotifyCondVar(op->doneCV);
+ PR_Unlock(pr_wp.ml);
+ }
+#endif
+
+ while (1) {
+ do {
+ pid = waitpid((pid_t) -1, &status, WNOHANG);
+ } while ((pid_t) -1 == pid && EINTR == errno);
+ if (0 == pid) break;
+ if ((pid_t) -1 == pid) {
+ /* must be because we have no child processes */
+ PR_ASSERT(ECHILD == errno);
+ break;
+ }
+
+ PR_Lock(pr_wp.ml);
+ ProcessReapedChildInternal(pid, status);
+ PR_Unlock(pr_wp.ml);
+ }
+ }
+}
+
+static void pr_SigchldHandler(int sig)
+{
+ int errnoCopy;
+ int rv;
+
+ errnoCopy = errno;
+
+ do {
+ rv = write(pr_wp.pipefd[1], "", 1);
+ } while (-1 == rv && EINTR == errno);
+
+#ifdef DEBUG
+ if (-1 == rv && EAGAIN != errno && EWOULDBLOCK != errno) {
+ char *msg = "cannot write to pipe\n";
+ write(2, msg, strlen(msg) + 1);
+ _exit(1);
+ }
+#endif
+
+ errno = errnoCopy;
+}
+
+static void pr_InstallSigchldHandler()
+{
+#if defined(HPUX) && defined(_PR_DCETHREADS)
+#error "HP-UX DCE threads have their own SIGCHLD handler"
+#endif
+
+ struct sigaction act, oact;
+ int rv;
+
+ act.sa_handler = pr_SigchldHandler;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_NOCLDSTOP | SA_RESTART;
+ rv = sigaction(SIGCHLD, &act, &oact);
+ PR_ASSERT(0 == rv);
+ /* Make sure we are not overriding someone else's SIGCHLD handler */
+#ifndef _PR_SHARE_CLONES
+ PR_ASSERT(oact.sa_handler == SIG_DFL);
+#endif
+}
+
+#endif /* !defined(_PR_NATIVE_THREADS) */
+
+static PRStatus _MD_InitProcesses()
+{
+#if !defined(_PR_NATIVE_THREADS)
+ int rv;
+ int flags;
+#endif
+#ifdef SUNOS4
+#define _PR_NBIO_FLAG FNDELAY
+#else
+#define _PR_NBIO_FLAG O_NONBLOCK
+#endif
+
+ pr_wp.ml = PR_NewLock();
+ PR_ASSERT(NULL != pr_wp.ml);
+
+#if defined(_PR_NATIVE_THREADS)
+ pr_wp.numProcs = 0;
+ pr_wp.cv = PR_NewCondVar(pr_wp.ml);
+ PR_ASSERT(NULL != pr_wp.cv);
+#else
+ rv = pipe(pr_wp.pipefd);
+ PR_ASSERT(0 == rv);
+ flags = fcntl(pr_wp.pipefd[0], F_GETFL, 0);
+ fcntl(pr_wp.pipefd[0], F_SETFL, flags | _PR_NBIO_FLAG);
+ flags = fcntl(pr_wp.pipefd[1], F_GETFL, 0);
+ fcntl(pr_wp.pipefd[1], F_SETFL, flags | _PR_NBIO_FLAG);
+
+#ifndef _PR_SHARE_CLONES
+ pr_InstallSigchldHandler();
+#endif
+#endif /* !_PR_NATIVE_THREADS */
+
+ pr_wp.thread = PR_CreateThread(PR_SYSTEM_THREAD,
+ WaitPidDaemonThread, NULL, PR_PRIORITY_NORMAL,
+#ifdef _PR_SHARE_CLONES
+ PR_GLOBAL_THREAD,
+#else
+ PR_LOCAL_THREAD,
+#endif
+ PR_JOINABLE_THREAD, 0);
+ PR_ASSERT(NULL != pr_wp.thread);
+
+ pr_wp.pidTable = (pr_PidRecord**)PR_CALLOC(NBUCKETS * sizeof(pr_PidRecord *));
+ PR_ASSERT(NULL != pr_wp.pidTable);
+ return PR_SUCCESS;
+}
+
+PRStatus _MD_DetachUnixProcess(PRProcess *process)
+{
+ PRStatus retVal = PR_SUCCESS;
+ pr_PidRecord *pRec;
+
+ PR_Lock(pr_wp.ml);
+ pRec = FindPidTable(process->md.pid);
+ if (NULL == pRec) {
+ pRec = PR_NEW(pr_PidRecord);
+ if (NULL == pRec) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ retVal = PR_FAILURE;
+ goto done;
+ }
+ pRec->pid = process->md.pid;
+ pRec->state = _PR_PID_DETACHED;
+ pRec->reapedCV = NULL;
+ InsertPidTable(pRec);
+ } else {
+ PR_ASSERT(_PR_PID_REAPED == pRec->state);
+ if (_PR_PID_REAPED != pRec->state) {
+ PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
+ retVal = PR_FAILURE;
+ } else {
+ DeletePidTable(pRec);
+ PR_ASSERT(NULL == pRec->reapedCV);
+ PR_DELETE(pRec);
+ }
+ }
+
+done:
+ PR_Unlock(pr_wp.ml);
+ return retVal;
+}
+
+PRStatus _MD_WaitUnixProcess(
+ PRProcess *process,
+ PRInt32 *exitCode)
+{
+ pr_PidRecord *pRec;
+ PRStatus retVal = PR_SUCCESS;
+ PRBool interrupted = PR_FALSE;
+
+ PR_Lock(pr_wp.ml);
+ pRec = FindPidTable(process->md.pid);
+ if (NULL == pRec) {
+ pRec = PR_NEW(pr_PidRecord);
+ if (NULL == pRec) {
+ PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
+ retVal = PR_FAILURE;
+ goto done;
+ }
+ pRec->pid = process->md.pid;
+ pRec->state = _PR_PID_WAITING;
+ pRec->reapedCV = PR_NewCondVar(pr_wp.ml);
+ if (NULL == pRec->reapedCV) {
+ PR_DELETE(pRec);
+ retVal = PR_FAILURE;
+ goto done;
+ }
+ InsertPidTable(pRec);
+ while (!interrupted && _PR_PID_REAPED != pRec->state) {
+ if (PR_WaitCondVar(pRec->reapedCV,
+ PR_INTERVAL_NO_TIMEOUT) == PR_FAILURE
+ && PR_GetError() == PR_PENDING_INTERRUPT_ERROR) {
+ interrupted = PR_TRUE;
+ }
+ }
+ if (_PR_PID_REAPED == pRec->state) {
+ if (exitCode) {
+ *exitCode = pRec->exitStatus;
+ }
+ } else {
+ PR_ASSERT(interrupted);
+ retVal = PR_FAILURE;
+ }
+ DeletePidTable(pRec);
+ PR_DestroyCondVar(pRec->reapedCV);
+ PR_DELETE(pRec);
+ } else {
+ PR_ASSERT(_PR_PID_REAPED == pRec->state);
+ PR_ASSERT(NULL == pRec->reapedCV);
+ DeletePidTable(pRec);
+ if (exitCode) {
+ *exitCode = pRec->exitStatus;
+ }
+ PR_DELETE(pRec);
+ }
+
+done:
+ PR_Unlock(pr_wp.ml);
+ return retVal;
+} /* _MD_WaitUnixProcess */
+
+PRStatus _MD_KillUnixProcess(PRProcess *process)
+{
+ PRErrorCode prerror;
+ PRInt32 oserror;
+
+ if (kill(process->md.pid, SIGKILL) == 0) {
+ return PR_SUCCESS;
+ }
+ oserror = errno;
+ switch (oserror) {
+ case EPERM:
+ prerror = PR_NO_ACCESS_RIGHTS_ERROR;
+ break;
+ case ESRCH:
+ prerror = PR_INVALID_ARGUMENT_ERROR;
+ break;
+ default:
+ prerror = PR_UNKNOWN_ERROR;
+ break;
+ }
+ PR_SetError(prerror, oserror);
+ return PR_FAILURE;
+} /* _MD_KillUnixProcess */
diff --git a/pr/src/md/unix/uxwrap.c b/pr/src/md/unix/uxwrap.c
new file mode 100644
index 00000000..9fc0fd3d
--- /dev/null
+++ b/pr/src/md/unix/uxwrap.c
@@ -0,0 +1,518 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/*
+ * The contents of this file are subject to the Netscape Public License
+ * Version 1.0 (the "NPL"); you may not use this file except in
+ * compliance with the NPL. You may obtain a copy of the NPL at
+ * http://www.mozilla.org/NPL/
+ *
+ * Software distributed under the NPL is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
+ * for the specific language governing rights and limitations under the
+ * NPL.
+ *
+ * The Initial Developer of this code under the NPL is Netscape
+ * Communications Corporation. Portions created by Netscape are
+ * Copyright (C) 1998 Netscape Communications Corporation. All Rights
+ * Reserved.
+ */
+
+/*
+ *------------------------------------------------------------------------
+ * File: uxwrap.c
+ *
+ * Our wrapped versions of the Unix select() and poll() system calls.
+ *
+ *------------------------------------------------------------------------
+ */
+
+#include "primpl.h"
+
+#if defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY)
+/* Do not wrap select() and poll(). */
+#else /* defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY) */
+/* The include files for select() */
+#ifdef IRIX
+#include <unistd.h>
+#include <bstring.h>
+#endif
+
+#include <string.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+#define ZAP_SET(_to, _width) \
+ PR_BEGIN_MACRO \
+ memset(_to, 0, \
+ ((_width + 8*sizeof(int)-1) / (8*sizeof(int))) \
+ * sizeof(int) \
+ ); \
+ PR_END_MACRO
+
+#define COPY_SET(_to, _from, _width) \
+ PR_BEGIN_MACRO \
+ memcpy(_to, _from, \
+ ((_width + 8*sizeof(int)-1) / (8*sizeof(int))) \
+ * sizeof(int) \
+ ); \
+ PR_END_MACRO
+
+/* An internal global variable defined in prfile.c */
+extern PRIOMethods _pr_fileMethods;
+
+
+/* see comments in ns/cmd/xfe/mozilla.c (look for "PR_XGetXtHackFD") */
+static int _pr_xt_hack_fd = -1;
+
+int PR_XGetXtHackFD(void)
+{
+ int fds[2];
+
+ if (_pr_xt_hack_fd == -1) {
+ if (!pipe(fds)) {
+ _pr_xt_hack_fd = fds[0];
+ }
+ }
+ return _pr_xt_hack_fd;
+ }
+
+static int (*_pr_xt_hack_okayToReleaseXLock)(void) = 0;
+
+void PR_SetXtHackOkayToReleaseXLockFn(int (*fn)(void))
+{
+ _pr_xt_hack_okayToReleaseXLock = fn;
+}
+
+
+/*
+ *-----------------------------------------------------------------------
+ * select() --
+ *
+ * Wrap up the select system call so that we can deschedule
+ * a thread that tries to wait for i/o.
+ *
+ *-----------------------------------------------------------------------
+ */
+
+#if defined(HPUX9)
+int select(size_t width, int *rl, int *wl, int *el, const struct timeval *tv)
+#elif defined(AIX4_1)
+int wrap_select(unsigned long width, void *rl, void *wl, void *el,
+ struct timeval *tv)
+#elif (defined(BSDI) && !defined(BSDI_2))
+int select(int width, fd_set *rd, fd_set *wr, fd_set *ex,
+ const struct timeval *tv)
+#else
+int select(int width, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *tv)
+#endif
+{
+ int i;
+ int nfds;
+ int npds;
+ void *pollset;
+ PRPollDesc *pd;
+ PRFileDesc *prfd;
+ PRFilePrivate *secret;
+ PRIntervalTime timeout;
+ int retVal;
+#if defined(HPUX9) || defined(AIX4_1)
+ fd_set *rd = (fd_set*) rl;
+ fd_set *wr = (fd_set*) wl;
+ fd_set *ex = (fd_set*) el;
+#endif
+ fd_set r, w, x;
+
+#if 0
+ /*
+ * Easy special case: zero timeout. Simply call the native
+ * select() with no fear of blocking.
+ */
+ if (tv != NULL && tv->tv_sec == 0 && tv->tv_usec == 0) {
+#if defined(HPUX9) || defined(AIX4_1)
+ return _MD_SELECT(width, rl, wl, el, tv);
+#else
+ return _MD_SELECT(width, rd, wr, ex, tv);
+#endif
+ }
+#endif
+
+ if (!_pr_initialized)
+ _PR_ImplicitInitialization();
+
+#ifndef _PR_LOCAL_THREADS_ONLY
+ if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
+ nfds = _MD_SELECT(width, rd, wr, ex, tv);
+ return(nfds);
+ }
+#endif
+
+ if (width < 0 || width > FD_SETSIZE) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Compute timeout */
+ if (tv) {
+ /*
+ * These acceptable ranges for t_sec and t_usec are taken
+ * from the select() man pages.
+ */
+ if (tv->tv_sec < 0 || tv->tv_sec > 100000000
+ || tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Convert microseconds to ticks */
+ timeout = PR_MicrosecondsToInterval(1000000*tv->tv_sec + tv->tv_usec);
+ } else {
+ /* tv being a NULL pointer means blocking indefinitely */
+ timeout = PR_INTERVAL_NO_TIMEOUT;
+ }
+
+ /* Check for no descriptors case (just doing a timeout) */
+ if ((!rd && !wr && !ex) || !width) {
+ PR_Sleep(timeout);
+ return 0;
+ }
+
+ if (rd) { COPY_SET(&r, rd, width); }
+ if (wr) { COPY_SET(&w, wr, width); }
+ if (ex) { COPY_SET(&x, ex, width); }
+
+ /*
+ * Set up for PR_Poll(). The PRPollDesc array is allocated
+ * dynamically. If this turns out to have high performance
+ * penalty, one can change to use a large PRPollDesc array
+ * on the stack, and allocate dynamically only when it turns
+ * out to be not large enough.
+ *
+ * I allocate an array of size 'width', which is the maximum
+ * number of fds we may need to poll.
+ */
+ pollset = PR_CALLOC(width *
+ (sizeof(PRPollDesc) + sizeof(PRFileDesc) + sizeof(PRFilePrivate)));
+ if (!pollset) {
+ errno = ENOMEM;
+ return -1;
+ }
+ pd = (PRPollDesc*)pollset;
+ prfd = (PRFileDesc*)(&pd[width]);
+ secret = (PRFilePrivate*)(&prfd[width]);
+
+ for (npds = 0, i = 0; i < width; i++) {
+ int in_flags = 0;
+ if (rd && FD_ISSET(i, &r)) {
+ in_flags |= PR_POLL_READ;
+ }
+ if (wr && FD_ISSET(i, &w)) {
+ in_flags |= PR_POLL_WRITE;
+ }
+ if (ex && FD_ISSET(i, &x)) {
+ in_flags |= PR_POLL_EXCEPT;
+ }
+ if (in_flags) {
+ prfd[npds].secret = &secret[npds];
+ prfd[npds].secret->state = _PR_FILEDESC_OPEN;
+ prfd[npds].secret->md.osfd = i;
+ prfd[npds].methods = &_pr_fileMethods;
+
+ pd[npds].fd = &prfd[npds];
+ pd[npds].in_flags = in_flags;
+ pd[npds].out_flags = 0;
+ npds += 1;
+ }
+ }
+
+ /* see comments in ns/cmd/xfe/mozilla.c (look for "PR_XGetXtHackFD") */
+ {
+
+ int needToLockXAgain;
+
+ needToLockXAgain = 0;
+ if (rd && (_pr_xt_hack_fd != -1) &&
+ FD_ISSET(_pr_xt_hack_fd, &r) && PR_XIsLocked() &&
+ (!_pr_xt_hack_okayToReleaseXLock || _pr_xt_hack_okayToReleaseXLock())) {
+ PR_XUnlock();
+ needToLockXAgain = 1;
+ }
+
+ /* This is the potentially blocking step */
+ retVal = PR_Poll(pd, npds, timeout);
+
+ if (needToLockXAgain) {
+ PR_XLock();
+ }
+ }
+
+ if (retVal > 0)
+ {
+ /* Compute select results */
+ if (rd) ZAP_SET(rd, width);
+ if (wr) ZAP_SET(wr, width);
+ if (ex) ZAP_SET(ex, width);
+
+ /*
+ * The return value can be either the number of ready file
+ * descriptors or the number of set bits in the three fd_set's.
+ */
+ retVal = 0; /* we're going to recompute */
+ for (i = 0; i < npds; ++i, pd++)
+ {
+ if (pd->out_flags) {
+ int nbits = 0; /* The number of set bits on for this fd */
+
+ if (pd->out_flags & PR_POLL_NVAL) {
+ errno = EBADF;
+ PR_LOG(_pr_io_lm, PR_LOG_ERROR,
+ ("select returns EBADF for %d", pd->fd));
+ retVal = -1;
+ break;
+ }
+ if (rd && (pd->out_flags & PR_POLL_READ)) {
+ FD_SET(pd->fd->secret->md.osfd, rd);
+ nbits++;
+ }
+ if (wr && (pd->out_flags & PR_POLL_WRITE)) {
+ FD_SET(pd->fd->secret->md.osfd, wr);
+ nbits++;
+ }
+ if (ex && (pd->out_flags & PR_POLL_EXCEPT)) {
+ FD_SET(pd->fd->secret->md.osfd, ex);
+ nbits++;
+ }
+ PR_ASSERT(nbits > 0);
+#if defined(HPUX) || defined(SOLARIS) || defined(SUNOS4) || defined(OSF1) || defined(AIX)
+ retVal += nbits;
+#else /* IRIX */
+ retVal += 1;
+#endif
+ }
+ }
+ }
+
+ PR_ASSERT(tv || retVal != 0);
+ PR_LOG(_pr_io_lm, PR_LOG_MIN, ("select returns %d", retVal));
+ PR_DELETE(pollset);
+
+ return retVal;
+}
+
+/*
+ * Linux, BSDI, and FreeBSD don't have poll()
+ */
+
+#if !defined(LINUX) && !defined(FREEBSD) && !defined(BSDI)
+
+/*
+ *-----------------------------------------------------------------------
+ * poll() --
+ *
+ * RETURN VALUES:
+ * -1: fails, errno indicates the error.
+ * 0: timed out, the revents bitmasks are not set.
+ * positive value: the number of file descriptors for which poll()
+ * has set the revents bitmask.
+ *
+ *-----------------------------------------------------------------------
+ */
+
+#include <poll.h>
+
+#if defined(AIX4_1)
+int wrap_poll(void *listptr, unsigned long nfds, long timeout)
+#elif (defined(AIX) && !defined(AIX4_1))
+int poll(void *listptr, unsigned long nfds, long timeout)
+#elif defined(OSF1) || (defined(HPUX) && !defined(HPUX9))
+int poll(struct pollfd filedes[], unsigned int nfds, int timeout)
+#elif defined(HPUX9)
+int poll(struct pollfd filedes[], int nfds, int timeout)
+#else
+int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
+#endif
+{
+#ifdef AIX
+ struct pollfd *filedes = (struct pollfd *) listptr;
+#endif
+ void *pollset;
+ PRPollDesc *pd;
+ PRFileDesc *prfd;
+ PRFilePrivate *secret;
+ int i;
+ PRUint32 ticks;
+ PRInt32 retVal;
+
+ /*
+ * Easy special case: zero timeout. Simply call the native
+ * poll() with no fear of blocking.
+ */
+ if (timeout == 0) {
+#if defined(AIX)
+ return _MD_POLL(listptr, nfds, timeout);
+#else
+ return _MD_POLL(filedes, nfds, timeout);
+#endif
+ }
+
+ if (!_pr_initialized) {
+ _PR_ImplicitInitialization();
+ }
+
+#ifndef _PR_LOCAL_THREADS_ONLY
+ if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
+ retVal = _MD_POLL(filedes, nfds, timeout);
+ return(retVal);
+ }
+#endif
+
+ /* We do not support the pollmsg structures on AIX */
+#ifdef AIX
+ PR_ASSERT((nfds & 0xff00) == 0);
+#endif
+
+ if (timeout < 0 && timeout != -1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* Convert timeout from miliseconds to ticks */
+ if (timeout == -1) {
+ ticks = PR_INTERVAL_NO_TIMEOUT;
+ } else if (timeout == 0) {
+ ticks = PR_INTERVAL_NO_WAIT;
+ } else {
+ ticks = PR_MillisecondsToInterval(timeout);
+ }
+
+ /* Check for no descriptor case (just do a timeout) */
+ if (nfds == 0) {
+ PR_Sleep(ticks);
+ return 0;
+ }
+
+ pollset = PR_CALLOC(nfds *
+ (sizeof(PRPollDesc) + sizeof(PRFileDesc) + sizeof(PRFilePrivate)));
+ if (!pollset) {
+ errno = EAGAIN;
+ return -1;
+ }
+ pd = (PRPollDesc*)pollset;
+ prfd = (PRFileDesc*)(&pd[nfds]);
+ secret = (PRFilePrivate*)(&prfd[nfds]);
+
+ for (i = 0; i < nfds; i++) {
+ prfd[i].secret = &secret[i];
+ prfd[i].secret->state = _PR_FILEDESC_OPEN;
+ prfd[i].secret->md.osfd = filedes[i].fd;
+ prfd[i].methods = &_pr_fileMethods;
+
+ pd[i].fd = &prfd[i];
+ pd[i].out_flags = 0;
+
+ /*
+ * poll() ignores negative fd's. We emulate this behavior
+ * by making sure the in_flags for a negative fd is zero.
+ */
+ if (filedes[i].fd < 0) {
+ pd[i].in_flags = 0;
+ continue;
+ }
+#ifdef _PR_USE_POLL
+ pd[i].in_flags = filedes[i].events;
+#else
+ /*
+ * Map the native poll flags to nspr20 poll flags.
+ * POLLIN, POLLRDNORM ===> PR_POLL_READ
+ * POLLOUT, POLLWRNORM ===> PR_POLL_WRITE
+ * POLLPRI, POLLRDBAND ===> PR_POLL_EXCEPT
+ * POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
+ * are ignored.
+ *
+ * The output events POLLERR and POLLHUP are never turned on.
+ * POLLNVAL may be turned on.
+ */
+ pd[i].in_flags = 0;
+ if (filedes[i].events & (POLLIN
+#ifdef POLLRDNORM
+ | POLLRDNORM
+#endif
+ )) {
+ pd[i].in_flags |= PR_POLL_READ;
+ }
+ if (filedes[i].events & (POLLOUT
+#ifdef POLLWRNORM
+ | POLLWRNORM
+#endif
+ )) {
+ pd[i].in_flags |= PR_POLL_WRITE;
+ }
+ if (filedes[i].events & (POLLPRI
+#ifdef POLLRDBAND
+ | POLLRDBAND
+#endif
+ )) {
+ pd[i].in_flags |= PR_POLL_EXCEPT;
+ }
+#endif /* _PR_USE_POLL */
+ }
+
+ retVal = PR_Poll(pd, nfds, ticks);
+
+ if (retVal > 0) {
+ /* Set the revents bitmasks */
+ for (i = 0; i < nfds; i++) {
+ PR_ASSERT(filedes[i].fd >= 0 || pd[i].in_flags == 0);
+ if (filedes[i].fd < 0) {
+ continue; /* skip negative fd's */
+ }
+#ifdef _PR_USE_POLL
+ filedes[i].revents = pd[i].out_flags;
+#else
+ filedes[i].revents = 0;
+ if (0 == pd[i].out_flags) {
+ continue;
+ }
+ if (pd[i].out_flags & PR_POLL_READ) {
+ if (filedes[i].events & POLLIN)
+ filedes[i].revents |= POLLIN;
+#ifdef POLLRDNORM
+ if (filedes[i].events & POLLRDNORM)
+ filedes[i].revents |= POLLRDNORM;
+#endif
+ }
+ if (pd[i].out_flags & PR_POLL_WRITE) {
+ if (filedes[i].events & POLLOUT)
+ filedes[i].revents |= POLLOUT;
+#ifdef POLLWRNORM
+ if (filedes[i].events & POLLWRNORM)
+ filedes[i].revents |= POLLWRNORM;
+#endif
+ }
+ if (pd[i].out_flags & PR_POLL_EXCEPT) {
+ if (filedes[i].events & POLLPRI)
+ filedes[i].revents |= POLLPRI;
+#ifdef POLLRDBAND
+ if (filedes[i].events & POLLRDBAND)
+ filedes[i].revents |= POLLRDBAND;
+#endif
+ }
+ if (pd[i].out_flags & PR_POLL_ERR) {
+ filedes[i].revents |= POLLERR;
+ }
+ if (pd[i].out_flags & PR_POLL_NVAL) {
+ filedes[i].revents |= POLLNVAL;
+ }
+#endif /* _PR_USE_POLL */
+ }
+ }
+
+ PR_DELETE(pollset);
+
+ return retVal;
+}
+
+#endif /* !defined(LINUX) */
+
+#endif /* defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY) */
+
+/* uxwrap.c */
+