summaryrefslogtreecommitdiff
path: root/mit-pthreads/pthreads
diff options
context:
space:
mode:
authorbk@work.mysql.com <>2000-07-31 21:29:14 +0200
committerbk@work.mysql.com <>2000-07-31 21:29:14 +0200
commitf4c589ff6c653d1d2a09c26e46ead3c8a15655d8 (patch)
treed253a359142dfc1ed247d5d4365d86972ea31109 /mit-pthreads/pthreads
parent7eec25e393727b16bb916b50d82b0aa3084e065c (diff)
downloadmariadb-git-f4c589ff6c653d1d2a09c26e46ead3c8a15655d8.tar.gz
Import changeset
Diffstat (limited to 'mit-pthreads/pthreads')
-rw-r--r--mit-pthreads/pthreads/GNUmakefile.inc46
-rw-r--r--mit-pthreads/pthreads/Makefile.inc75
-rw-r--r--mit-pthreads/pthreads/_exit.c80
-rw-r--r--mit-pthreads/pthreads/cleanup.c84
-rw-r--r--mit-pthreads/pthreads/cond.c437
-rw-r--r--mit-pthreads/pthreads/condattr.c90
-rw-r--r--mit-pthreads/pthreads/dump_state.c88
-rw-r--r--mit-pthreads/pthreads/errno.c53
-rw-r--r--mit-pthreads/pthreads/fd.c1083
-rw-r--r--mit-pthreads/pthreads/fd_kern.c1950
-rw-r--r--mit-pthreads/pthreads/fd_pipe.c257
-rw-r--r--mit-pthreads/pthreads/fd_sysv.c897
-rw-r--r--mit-pthreads/pthreads/file.c129
-rw-r--r--mit-pthreads/pthreads/globals.c85
-rw-r--r--mit-pthreads/pthreads/info.c77
-rw-r--r--mit-pthreads/pthreads/init.cc9
-rw-r--r--mit-pthreads/pthreads/malloc.c383
-rw-r--r--mit-pthreads/pthreads/mutex.c371
-rw-r--r--mit-pthreads/pthreads/mutexattr.c90
-rw-r--r--mit-pthreads/pthreads/panic.c58
-rw-r--r--mit-pthreads/pthreads/prio_queue.c176
-rw-r--r--mit-pthreads/pthreads/process.c208
-rw-r--r--mit-pthreads/pthreads/pthread.c293
-rw-r--r--mit-pthreads/pthreads/pthread_attr.c255
-rw-r--r--mit-pthreads/pthreads/pthread_cancel.c258
-rw-r--r--mit-pthreads/pthreads/pthread_detach.c92
-rw-r--r--mit-pthreads/pthreads/pthread_init.c135
-rw-r--r--mit-pthreads/pthreads/pthread_join.c139
-rw-r--r--mit-pthreads/pthreads/pthread_kill.c93
-rw-r--r--mit-pthreads/pthreads/pthread_once.c59
-rw-r--r--mit-pthreads/pthreads/queue.c143
-rw-r--r--mit-pthreads/pthreads/readv.c85
-rw-r--r--mit-pthreads/pthreads/schedparam.c170
-rw-r--r--mit-pthreads/pthreads/select.c255
-rw-r--r--mit-pthreads/pthreads/sig.c452
-rw-r--r--mit-pthreads/pthreads/signal.c653
-rw-r--r--mit-pthreads/pthreads/sleep.c367
-rw-r--r--mit-pthreads/pthreads/specific.c198
-rw-r--r--mit-pthreads/pthreads/stat.c116
-rw-r--r--mit-pthreads/pthreads/wait.c159
-rw-r--r--mit-pthreads/pthreads/wrapper.c149
-rw-r--r--mit-pthreads/pthreads/writev.c89
42 files changed, 10886 insertions, 0 deletions
diff --git a/mit-pthreads/pthreads/GNUmakefile.inc b/mit-pthreads/pthreads/GNUmakefile.inc
new file mode 100644
index 00000000000..c8621495bac
--- /dev/null
+++ b/mit-pthreads/pthreads/GNUmakefile.inc
@@ -0,0 +1,46 @@
+# from: @(#)Makefile.inc 5.6 (Berkeley) 6/4/91
+
+# pthread sources
+VPATH := $(VPATH):${srcdir}/pthreads
+
+SRCS:= cleanup.c cond.c fd.c fd_kern.c fd_pipe.c fd_sysv.c file.c globals.c \
+ malloc.c mutex.c pthread.c pthread_attr.c queue.c signal.c machdep.c \
+ syscall.S pthread_join.c pthread_detach.c pthread_once.c sleep.c \
+ specific.c process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \
+ pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \
+ dump_state.c pthread_kill.c stat.c readv.c writev.c condattr.c \
+ pthread_cancel.c panic.c $(SRCS)
+
+ifeq ($(HAVE_SYSCALL_TEMPLATE),yes)
+SYSCALL_FILTER_RULE= for s in $(AVAILABLE_SYSCALLS) ; do \
+ case " $(SYSCALL_EXCEPTIONS) " in \
+ *" "$$s" "*) ;; \
+ *) echo $$s ;; \
+ esac ; \
+ done
+STD_SYSCALLS:=$(shell $(SYSCALL_FILTER_RULE))
+STD_SYSCALL_FILES:= $(addprefix S,$(addsuffix .o,$(STD_SYSCALLS)))
+EXTRA_OBJS := $(EXTRA_OBJS) syscalls.o
+# EXTRA_OBJS := $(EXTRA_OBJS) $(STD_SYSCALL_FILES)
+
+ifndef SYSCALL_PIC_COMPILE
+SYSCALL_PIC_COMPILE=true
+endif
+
+obj/syscalls.o: syscall-template.S
+ -rm -rf obj/syscalls
+ mkdir obj/syscalls
+ for syscall in $(STD_SYSCALLS) ; do \
+ echo $$syscall ; \
+ $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -c syscall-template.S -o obj/syscalls/S$$syscall.o ; \
+ $(SYSCALL_PIC_COMPILE) ; \
+ done
+ x=`pwd` && cd obj/syscalls && ld -r -o ../syscalls.o S*.o && cd $$x
+ rm -r obj/syscalls
+endif
+
+syscall.o: ${.CURDIR}/pthreads/syscall.S
+ cpp ${CPPFLAGS} ${.CURDIR}/pthreads/syscall.S > syscall.i
+ as syscall.i
+ rm syscall.i
+ mv a.out syscall.o
diff --git a/mit-pthreads/pthreads/Makefile.inc b/mit-pthreads/pthreads/Makefile.inc
new file mode 100644
index 00000000000..3939d57de6e
--- /dev/null
+++ b/mit-pthreads/pthreads/Makefile.inc
@@ -0,0 +1,75 @@
+# from: @(#)Makefile.inc 5.6 (Berkeley) 6/4/91
+
+# pthread sources
+.PATH: ${srcdir}/pthreads
+
+SRCS+= cleanup.c cond.c fd.c fd_kern.c fd_pipe.c file.c globals.c malloc.c \
+ mutex.c pthread.c pthread_attr.c queue.c signal.c machdep.c syscall.S \
+ pthread_join.c pthread_detach.c pthread_once.c sleep.c specific.c \
+ process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \
+ pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \
+ dump_state.c pthread_kill.c condattr.c pthread_cancel.c panic.c
+
+.if $(HAVE_SYSCALL_TEMPLATE) == yes
+OBJS+= syscalls.o
+.if !defined(NOPIC)
+SOBJS+= syscalls.so
+SYSCALL_PIC_COMPILE= $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -DPIC -c ${.CURDIR}/syscall-template.S -o ${.OBJDIR}/syscalls/S$$syscall.so
+.else
+SYSCALL_PIC_COMPILE= true
+.endif
+.if !defined(NOPROFILE)
+POBJS+= syscalls.po
+SYSCALL_PROF_COMPILE= $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -pg -c ${.CURDIR}/syscall-template.S -o ${.OBJDIR}/syscalls/S$$syscall.po
+.else
+SYSCALL_PROF_COMPILE= true
+.endif
+
+OPSYS!= uname -s
+
+syscalls.o syscalls.so syscalls.po : syscall-template.S
+ -rm -rf ${.OBJDIR}/syscalls
+ mkdir ${.OBJDIR}/syscalls
+ for syscall in $(AVAILABLE_SYSCALLS) ; do \
+ case " $(SYSCALL_EXCEPTIONS) " in \
+ *" "$$syscall" "*) ;; \
+ *) echo $$syscall ; \
+ $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -c ${.CURDIR}/syscall-template.S -o ${.OBJDIR}/syscalls/S$$syscall.o ; \
+ $(SYSCALL_PIC_COMPILE) ; \
+ $(SYSCALL_PROF_COMPILE) ;; \
+ esac ; \
+ done
+ x=`pwd` && cd ${.OBJDIR}/syscalls && ld -r -o ../syscalls.o *.o && cd $$x
+.if !defined(NOPIC)
+ x=`pwd` && cd ${.OBJDIR}/syscalls && ld -r -o ../syscalls.so *.so && cd $$x
+.endif
+.if !defined(NOPROFILE)
+ x=`pwd` && cd ${.OBJDIR}/syscalls && ld -r -o ../syscalls.po *.po && cd $$x
+.endif
+ rm -r ${.OBJDIR}/syscalls
+.endif
+
+syscall.o: syscall.S
+.if (${OPSYS} == "FreeBSD")
+ $(CC) -c -x assembler-with-cpp -o syscall.o ${.CURDIR}/syscall.S
+.else
+ cpp ${CPPFLAGS} ${.CURDIR}/syscall.S > syscall.i
+ as syscall.i
+ rm syscall.i
+ mv a.out syscall.o
+.endif
+
+syscall.po: syscall.S
+.if (${OPSYS} == "FreeBSD")
+ $(CC) -c -x assembler-with-cpp -o syscall.po ${.CURDIR}/syscall.S
+.else
+ cpp ${CPPFLAGS} ${.CURDIR}/syscall.S > syscall.i
+ as syscall.i
+ rm syscall.i
+ mv a.out syscall.po
+.endif
+
+MAN2+=
+
+MAN3+=
+
diff --git a/mit-pthreads/pthreads/_exit.c b/mit-pthreads/pthreads/_exit.c
new file mode 100644
index 00000000000..fde795011ce
--- /dev/null
+++ b/mit-pthreads/pthreads/_exit.c
@@ -0,0 +1,80 @@
+/* ==== _exit.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : The locking functions for stdio.
+ *
+ * 1.00 94/09/04 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <fcntl.h>
+
+/* ==========================================================================
+ * _exit()
+ *
+ * Change all file descriptors back to their original state,
+ * before exiting for good.
+ */
+void _exit(int status)
+{
+ int fd;
+
+ pthread_sched_prevent();
+
+ for (fd = 0; fd < dtablesize; fd++) {
+ if (fd_table[fd] == NULL) {
+ continue;
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[fd]->ops) || (fd_table[fd]->ops->use_kfds != 1)) {
+ continue;
+ }
+ switch (fd_table[fd]->type) {
+ case FD_HALF_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL, fd_table[fd]->flags);
+ fd_table[fd]->type = FD_TEST_HALF_DUPLEX;
+ break;
+ case FD_FULL_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL, fd_table[fd]->flags);
+ fd_table[fd]->type = FD_TEST_FULL_DUPLEX;
+ break;
+ default:
+ break;
+ }
+ }
+ machdep_sys_exit(status);
+}
+
diff --git a/mit-pthreads/pthreads/cleanup.c b/mit-pthreads/pthreads/cleanup.c
new file mode 100644
index 00000000000..3eb096b8337
--- /dev/null
+++ b/mit-pthreads/pthreads/cleanup.c
@@ -0,0 +1,84 @@
+/* ==== cleanup.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Pthread attribute functions.
+ *
+ * 1.20 94/02/13 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* ==========================================================================
+ * pthread_cleanup_push()
+ */
+int pthread_cleanup_push(void (*routine)(void *), void *routine_arg)
+{
+ struct pthread_cleanup *new;
+ int ret;
+
+ if ((new = (struct pthread_cleanup*)malloc(sizeof(struct pthread_cleanup))))
+ {
+ new->routine = routine;
+ new->routine_arg = routine_arg;
+ new->next = pthread_run->cleanup;
+
+ pthread_run->cleanup = new;
+ ret = OK;
+ } else {
+ ret = ENOMEM;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_cleanup_pop()
+ */
+void pthread_cleanup_pop(int execute)
+{
+ struct pthread_cleanup *old;
+
+ if ((old = pthread_run->cleanup))
+ {
+ pthread_run->cleanup = old->next;
+ if (execute) {
+ old->routine(old->routine_arg);
+ }
+ free(old);
+ }
+}
+
diff --git a/mit-pthreads/pthreads/cond.c b/mit-pthreads/pthreads/cond.c
new file mode 100644
index 00000000000..8dacd0397ce
--- /dev/null
+++ b/mit-pthreads/pthreads/cond.c
@@ -0,0 +1,437 @@
+/* ==== cond.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Condition variable functions.
+ *
+ * 1.00 93/10/28 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <timers.h>
+#include <errno.h>
+
+#ifndef ETIME
+#define ETIME ETIMEDOUT
+#endif
+
+/* ==========================================================================
+ * pthread_cond_is_debug()
+ *
+ * Check that cond is a debug cond and if so returns entry number into
+ * array of debug condes.
+ */
+static int pthread_cond_debug_count = 0;
+static pthread_cond_t ** pthread_cond_debug_ptrs = NULL;
+static pthread_mutex_t pthread_cond_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static inline int pthread_cond_is_debug(pthread_cond_t * cond)
+{
+ int i;
+
+ for (i = 0; i < pthread_cond_debug_count; i++) {
+ if (pthread_cond_debug_ptrs[i] == cond) {
+ return(i);
+ }
+ }
+ return(NOTOK);
+}
+/* ==========================================================================
+ * pthread_cond_init()
+ *
+ * In this implementation I don't need to allocate memory.
+ * ENOMEM, EAGAIN should never be returned. Arch that have
+ * weird constraints may need special coding.
+ */
+int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
+{
+ enum pthread_condtype type;
+
+ /* Only check if attr specifies some mutex type other than fast */
+ if ((cond_attr) && (cond_attr->c_type != COND_TYPE_FAST)) {
+ if (cond_attr->c_type >= COND_TYPE_MAX) {
+ return(EINVAL);
+ }
+ type = cond_attr->c_type;
+ } else {
+ type = COND_TYPE_FAST;
+ }
+
+ switch (type) {
+ case COND_TYPE_FAST:
+ case COND_TYPE_COUNTING_FAST:
+ break;
+ case COND_TYPE_DEBUG:
+ pthread_mutex_lock(&pthread_cond_debug_mutex);
+ if (pthread_cond_is_debug(cond) == NOTOK) {
+ pthread_cond_t ** new;
+
+ if ((new = (pthread_cond_t **)realloc(pthread_cond_debug_ptrs,
+ (pthread_cond_debug_count + 1) * (sizeof(void *)))) == NULL) {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ return(ENOMEM);
+ }
+ pthread_cond_debug_ptrs = new;
+ pthread_cond_debug_ptrs[pthread_cond_debug_count++] = cond;
+ } else {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ return(EBUSY);
+ }
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ break;
+ case COND_TYPE_STATIC_FAST:
+ defualt:
+ return(EINVAL);
+ break;
+ }
+
+ /* Set all other paramaters */
+ pthread_queue_init(&cond->c_queue);
+ cond->c_flags |= COND_FLAGS_INITED;
+ cond->c_type = type;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_cond_destroy()
+ */
+int pthread_cond_destroy(pthread_cond_t *cond)
+{
+ int i;
+
+ /* Only check if cond is of type other than fast */
+ switch(cond->c_type) {
+ case COND_TYPE_FAST:
+ case COND_TYPE_COUNTING_FAST:
+ break;
+ case COND_TYPE_DEBUG:
+ if (pthread_queue_get(&(cond->c_queue))) {
+ return(EBUSY);
+ }
+ pthread_mutex_lock(&pthread_cond_debug_mutex);
+ if ((i = pthread_cond_is_debug(cond)) == NOTOK) {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ return(EINVAL);
+ }
+
+ /* Remove the cond from the list of debug condition variables */
+ pthread_cond_debug_ptrs[i] =
+ pthread_cond_debug_ptrs[--pthread_cond_debug_count];
+ pthread_cond_debug_ptrs[pthread_cond_debug_count] = NULL;
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ break;
+ case COND_TYPE_STATIC_FAST:
+ default:
+ return(EINVAL);
+ break;
+ }
+
+ /* Cleanup cond, others might want to use it. */
+ pthread_queue_init(&cond->c_queue);
+ cond->c_flags = 0;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_cond_wait()
+ */
+int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+ int rval;
+
+ pthread_sched_prevent();
+ switch (cond->c_type) {
+ case COND_TYPE_DEBUG:
+ pthread_mutex_lock(&pthread_cond_debug_mutex);
+ if (pthread_cond_is_debug(cond) == NOTOK) {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+
+ /*
+ * Fast condition variables do not check for any error conditions.
+ */
+ case COND_TYPE_FAST:
+ case COND_TYPE_STATIC_FAST:
+ pthread_queue_enq(&cond->c_queue, pthread_run);
+ pthread_mutex_unlock(mutex);
+
+ pthread_run->data.mutex = mutex;
+
+ SET_PF_WAIT_EVENT(pthread_run);
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ /* Reschedule will unlock pthread_run */
+ pthread_resched_resume(PS_COND_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+ CLEAR_PF_DONE_EVENT(pthread_run);
+
+ pthread_run->data.mutex = NULL;
+
+ rval = pthread_mutex_lock(mutex);
+ return(rval);
+ break;
+ case COND_TYPE_COUNTING_FAST:
+ {
+ int count = mutex->m_data.m_count;
+
+ pthread_queue_enq(&cond->c_queue, pthread_run);
+ pthread_mutex_unlock(mutex);
+ mutex->m_data.m_count = 1;
+
+ pthread_run->data.mutex = mutex;
+
+ SET_PF_WAIT_EVENT(pthread_run);
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ /* Reschedule will unlock pthread_run */
+ pthread_resched_resume(PS_COND_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+ CLEAR_PF_DONE_EVENT(pthread_run);
+
+ pthread_run->data.mutex = NULL;
+
+ rval = pthread_mutex_lock(mutex);
+ mutex->m_data.m_count = count;
+ return(rval);
+ break;
+ }
+ default:
+ rval = EINVAL;
+ break;
+ }
+ pthread_sched_resume();
+ return(rval);
+}
+
+/* ==========================================================================
+ * pthread_cond_timedwait()
+ */
+int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec * abstime)
+{
+ struct timespec current_time, new_time;
+ int rval = OK;
+
+ pthread_sched_prevent();
+ machdep_gettimeofday(& current_time);
+
+ switch (cond->c_type) {
+ case COND_TYPE_DEBUG:
+ pthread_mutex_lock(&pthread_cond_debug_mutex);
+ if (pthread_cond_is_debug(cond) == NOTOK) {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+
+ /*
+ * Fast condition variables do not check for any error conditions.
+ */
+ case COND_TYPE_FAST:
+ case COND_TYPE_STATIC_FAST:
+
+ /* Set pthread wakeup time*/
+ pthread_run->wakeup_time = *abstime;
+
+ /* Install us on the sleep queue */
+ sleep_schedule (&current_time, &(pthread_run->wakeup_time));
+
+ pthread_queue_enq(&cond->c_queue, pthread_run);
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_mutex_unlock(mutex);
+
+ pthread_run->data.mutex = mutex;
+
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ /* Reschedule will unlock pthread_run */
+ pthread_resched_resume(PS_COND_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+
+ pthread_run->data.mutex = NULL;
+
+ /* Remove ourselves from sleep queue. If we fail then we timedout */
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ SET_ERRNO(ETIME);
+ rval = ETIME;
+ }
+
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_mutex_lock(mutex);
+ return(rval);
+ break;
+ case COND_TYPE_COUNTING_FAST:
+ {
+ int count = mutex->m_data.m_count;
+
+ /* Set pthread wakeup time*/
+ pthread_run->wakeup_time = *abstime;
+
+ /* Install us on the sleep queue */
+ sleep_schedule (&current_time, &(pthread_run->wakeup_time));
+
+ pthread_queue_enq(&cond->c_queue, pthread_run);
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_mutex_unlock(mutex);
+
+ pthread_run->data.mutex = mutex;
+
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ /* Reschedule will unlock pthread_run */
+ pthread_resched_resume(PS_COND_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+
+ pthread_run->data.mutex = NULL;
+
+ /* Remove ourselves from sleep queue. If we fail then we timedout */
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ SET_ERRNO(ETIME);
+ rval = ETIME;
+ }
+
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_mutex_lock(mutex);
+ mutex->m_data.m_count = count;
+ return(rval);
+ break;
+ }
+ default:
+ rval = EINVAL;
+ break;
+ }
+ pthread_sched_resume();
+ return(rval);
+}
+
+/* ==========================================================================
+ * pthread_cond_signal()
+ */
+int pthread_cond_signal(pthread_cond_t *cond)
+{
+ struct pthread *pthread;
+ int rval;
+
+ pthread_sched_prevent();
+ switch (cond->c_type) {
+ case COND_TYPE_DEBUG:
+ pthread_mutex_lock(&pthread_cond_debug_mutex);
+ if (pthread_cond_is_debug(cond) == NOTOK) {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+
+ case COND_TYPE_FAST:
+ case COND_TYPE_STATIC_FAST:
+ if (pthread = pthread_queue_deq(&cond->c_queue)) {
+ if ((SET_PF_DONE_EVENT(pthread)) == OK) {
+ pthread_sched_other_resume(pthread);
+ } else {
+ pthread_sched_resume();
+ }
+ return(OK);
+ }
+ rval = OK;
+ break;
+ default:
+ rval = EINVAL;
+ break;
+ }
+ pthread_sched_resume();
+ return(rval);
+}
+
+/* ==========================================================================
+ * pthread_cond_broadcast()
+ *
+ * Not much different then the above routine.
+ */
+int pthread_cond_broadcast(pthread_cond_t *cond)
+{
+ struct pthread * pthread, * high_pthread, * low_pthread;
+ int rval;
+
+ pthread_sched_prevent();
+ switch (cond->c_type) {
+ case COND_TYPE_DEBUG:
+ pthread_mutex_lock(&pthread_cond_debug_mutex);
+ if (pthread_cond_is_debug(cond) == NOTOK) {
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+ pthread_mutex_unlock(&pthread_cond_debug_mutex);
+
+ case COND_TYPE_FAST:
+ case COND_TYPE_STATIC_FAST:
+ if (pthread = pthread_queue_deq(&cond->c_queue)) {
+ pthread->state = PS_RUNNING;
+ high_pthread = pthread;
+
+ while (pthread = pthread_queue_deq(&cond->c_queue)) {
+ if (pthread->pthread_priority >
+ high_pthread->pthread_priority) {
+ low_pthread = high_pthread;
+ high_pthread = pthread;
+ } else {
+ low_pthread = pthread;
+ }
+ if ((SET_PF_DONE_EVENT(low_pthread)) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue,
+ low_pthread);
+ low_pthread->state = PS_RUNNING;
+ }
+ }
+ if ((SET_PF_DONE_EVENT(high_pthread)) == OK) {
+ pthread_sched_other_resume(high_pthread);
+ } else {
+ pthread_sched_resume();
+ }
+ return(OK);
+ }
+ rval = OK;
+ break;
+ default:
+ rval = EINVAL;
+ break;
+ }
+ pthread_sched_resume();
+ return(rval);
+}
+
diff --git a/mit-pthreads/pthreads/condattr.c b/mit-pthreads/pthreads/condattr.c
new file mode 100644
index 00000000000..ac010bdf4b1
--- /dev/null
+++ b/mit-pthreads/pthreads/condattr.c
@@ -0,0 +1,90 @@
+/* ==== condattr.c ===========================================================
+ * Copyright (c) 1995 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Mutex functions.
+ *
+ * 1.00 95/08/22 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <errno.h>
+
+/* ==========================================================================
+ * pthread_condattr_init()
+ */
+int pthread_condattr_init(pthread_condattr_t *attr)
+{
+ attr->c_type = COND_TYPE_FAST;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_condattr_destroy()
+ */
+int pthread_condattr_destroy(pthread_condattr_t *attr)
+{
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_condattr_settype()
+ */
+int pthread_condattr_settype(pthread_condattr_t *attr, unsigned int type)
+{
+ switch(type) {
+ case PTHREAD_CONDTYPE_FAST:
+ attr->c_type = COND_TYPE_FAST;
+ break;
+ case PTHREAD_CONDTYPE_RECURSIVE:
+ attr->c_type = COND_TYPE_COUNTING_FAST;
+ break;
+ case PTHREAD_CONDTYPE_DEBUG:
+ attr->c_type = COND_TYPE_DEBUG;
+ break;
+ default:
+ return(EINVAL);
+ }
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_condattr_gettype()
+ */
+int pthread_condattr_gettype(pthread_condattr_t *attr, unsigned int * type)
+{
+ *type = (unsigned int)attr->c_type;
+ return(OK);
+}
diff --git a/mit-pthreads/pthreads/dump_state.c b/mit-pthreads/pthreads/dump_state.c
new file mode 100644
index 00000000000..3d9840bad64
--- /dev/null
+++ b/mit-pthreads/pthreads/dump_state.c
@@ -0,0 +1,88 @@
+/* ==== dump_state.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ * Description : Bogus debugging output routines.
+ *
+ * 1.00 95/02/08 snl
+ * -Started coding this file.
+ */
+
+#include <pthread.h>
+#include <stdio.h>
+
+/* ==========================================================================
+ * pthread_dump_state()
+ *
+ * Totally, totally bogus routine to dump the state of pthreads.
+ */
+
+void
+pthread_dump_state()
+{
+ pthread_t thread;
+
+ for (thread = pthread_link_list; thread; thread = thread->pll) {
+ printf("Thread %lx", thread);
+ if (thread == pthread_initial)
+ printf("*");
+ if (thread == pthread_run)
+ printf("^");
+ printf(" ");
+ switch (thread->state) {
+ case PS_RUNNING: printf("RUNNING "); break;
+ case PS_MUTEX_WAIT: printf("MUTEX_WAIT "); break;
+ case PS_COND_WAIT: printf("COND_WAIT "); break;
+ case PS_FDLR_WAIT: printf("FDLR_WAIT "); break;
+ case PS_FDLW_WAIT: printf("FDLW_WAIT "); break;
+ case PS_FDR_WAIT: printf("FDR_WAIT "); break;
+ case PS_FDW_WAIT: printf("FDW_WAIT "); break;
+ case PS_SELECT_WAIT: printf("SELECT "); break;
+ case PS_SLEEP_WAIT: printf("SLEEP_WAIT "); break;
+ case PS_WAIT_WAIT: printf("WAIT_WAIT "); break;
+ case PS_SIGWAIT: printf("SIGWAIT "); break;
+ case PS_JOIN: printf("JOIN "); break;
+ case PS_DEAD: printf("DEAD "); break;
+ default: printf("*UNKNOWN %d* ", thread->state);
+ break;
+ }
+ switch (thread->attr.schedparam_policy) {
+ case SCHED_RR: printf("RR "); break;
+ case SCHED_IO: printf("IO "); break;
+ case SCHED_FIFO: printf("FIFO "); break;
+ case SCHED_OTHER: printf("OTHER "); break;
+ default: printf("*UNKNOWN %d* ",
+ thread->attr.schedparam_policy);
+ break;
+ }
+ }
+}
diff --git a/mit-pthreads/pthreads/errno.c b/mit-pthreads/pthreads/errno.c
new file mode 100644
index 00000000000..bc680235424
--- /dev/null
+++ b/mit-pthreads/pthreads/errno.c
@@ -0,0 +1,53 @@
+/* ==== errno.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Return the pointer to the threads errno address.
+ *
+ * 1.32 94/05/25 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+
+/* ==========================================================================
+ * __error()
+ */
+int * __error()
+{
+ if (!pthread_run->error_p) {
+ pthread_run->error_p = &pthread_run->error;
+ }
+ return(pthread_run->error_p);
+}
diff --git a/mit-pthreads/pthreads/fd.c b/mit-pthreads/pthreads/fd.c
new file mode 100644
index 00000000000..3eb59c11bd1
--- /dev/null
+++ b/mit-pthreads/pthreads/fd.c
@@ -0,0 +1,1083 @@
+/* ==== fd.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : All the syscalls dealing with fds.
+ *
+ * 1.00 93/08/14 proven
+ * -Started coding this file.
+ *
+ * 1.01 93/11/13 proven
+ * -The functions readv() and writev() added.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include "config.h"
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/uio.h>
+#include <sys/ioctl.h>
+#ifdef HAVE_SYS_FILIO_H
+#include <sys/filio.h> /* For ioctl */
+#endif
+#if __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <fcntl.h>
+#include <errno.h>
+#include <pthread/posix.h>
+
+/*
+ * These first functions really should not be called by the user.
+ *
+ * I really should dynamically figure out what the table size is.
+ */
+static pthread_mutex_t fd_table_mutex = PTHREAD_MUTEX_INITIALIZER;
+static const int dtablecount = 4096/sizeof(struct fd_table_entry);
+int dtablesize;
+
+static int fd_get_pthread_fd_from_kernel_fd( int );
+
+/* ==========================================================================
+ * Allocate dtablecount entries at once and populate the fd_table.
+ *
+ * fd_init_entry()
+ */
+int fd_init_entry(int entry)
+{
+ struct fd_table_entry *fd_entry;
+ int i, round;
+
+ if (fd_table[entry] == NULL) {
+ round = entry - entry % dtablecount;
+
+ if ((fd_entry = (struct fd_table_entry *)malloc(
+ sizeof(struct fd_table_entry) * dtablecount)) == NULL) {
+ return(NOTOK);
+ }
+
+ for (i = 0; i < dtablecount && round+i < dtablesize; i++) {
+ fd_table[round + i] = &fd_entry[i];
+
+ fd_table[round + i]->ops = NULL;
+ fd_table[round + i]->type = FD_NT;
+ fd_table[round + i]->fd.i = NOTOK;
+ fd_table[round + i]->flags = 0;
+ fd_table[round + i]->count = 0;
+
+ pthread_mutex_init(&(fd_table[round + i]->mutex), NULL);
+ pthread_queue_init(&(fd_table[round + i]->r_queue));
+ pthread_queue_init(&(fd_table[round + i]->w_queue));
+ fd_table[round + i]->r_owner = NULL;
+ fd_table[round + i]->w_owner = NULL;
+ fd_table[round + i]->r_lockcount= 0;
+ fd_table[round + i]->w_lockcount= 0;
+
+ fd_table[round + i]->next = NULL;
+ }
+ }
+ return(OK);
+}
+
+/* ==========================================================================
+ * fd_check_entry()
+ */
+int fd_check_entry(unsigned int entry)
+{
+ int ret = OK;
+
+ pthread_mutex_lock(&fd_table_mutex);
+
+ if (entry < dtablesize) {
+ if (fd_table[entry] == NULL) {
+ if (fd_init_entry(entry)) {
+ SET_ERRNO(EBADF);
+ ret = -EBADF;
+ }
+ }
+ } else {
+ SET_ERRNO(EBADF);
+ ret = -EBADF;
+ }
+
+ pthread_mutex_unlock(&fd_table_mutex);
+ return(ret);
+}
+
+/* ==========================================================================
+ * fd_init()
+ */
+void fd_init(void)
+{
+ int i;
+
+ if ((dtablesize = machdep_sys_getdtablesize()) < 0) {
+ /* Can't figure out the table size. */
+ PANIC();
+ }
+
+ /* select() can only handle FD_SETSIZE descriptors, so our inner loop will
+ * break if dtablesize is higher than that. This should be removed if and
+ * when the inner loop is rewritten to use poll(). */
+ if (dtablesize > FD_SETSIZE) {
+ dtablesize = FD_SETSIZE;
+ }
+
+ if (fd_table = (struct fd_table_entry **)malloc(
+ sizeof(struct fd_table_entry) * dtablesize)) {
+ memset(fd_table, 0, sizeof(struct fd_table_entry) * dtablesize);
+ if (fd_check_entry(0) == OK) {
+ return;
+ }
+ }
+
+ /*
+ * There isn't enough memory to allocate a fd table at init time.
+ * This is a problem.
+ */
+ PANIC();
+
+}
+
+/* ==========================================================================
+ * fd_allocate()
+ */
+int fd_allocate()
+{
+ pthread_mutex_t * mutex;
+ int i;
+
+ for (i = 0; i < dtablesize; i++) {
+ if (fd_check_entry(i) == OK) {
+ mutex = &(fd_table[i]->mutex);
+ if (pthread_mutex_trylock(mutex)) {
+ continue;
+ }
+ if (fd_table[i]->count || fd_table[i]->r_owner
+ || fd_table[i]->w_owner) {
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ if (fd_table[i]->type == FD_NT) {
+ /* Test to see if the kernel version is in use */
+ if ((machdep_sys_fcntl(i, F_GETFL, NULL)) >= OK) {
+ /* If so continue; */
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ }
+ fd_table[i]->count++;
+ pthread_mutex_unlock(mutex);
+ return(i);
+ }
+ }
+ SET_ERRNO(ENFILE);
+ return(NOTOK);
+}
+
+/*----------------------------------------------------------------------
+ * Function: fd_get_pthread_fd_from_kernel_fd
+ * Purpose: get the fd_table index of a kernel fd
+ * Args: fd = kernel fd to convert
+ * Returns: fd_table index, -1 if not found
+ * Notes:
+ *----------------------------------------------------------------------*/
+static int
+fd_get_pthread_fd_from_kernel_fd( int kfd )
+{
+ int j;
+
+ /* This is *SICK*, but unless there is a faster way to
+ * turn a kernel fd into an fd_table index, this has to do.
+ */
+ for( j=0; j < dtablesize; j++ ) {
+ if( fd_table[j] &&
+ fd_table[j]->type != FD_NT &&
+ fd_table[j]->type != FD_NIU &&
+ fd_table[j]->fd.i == kfd ) {
+ return j;
+ }
+ }
+
+ /* Not listed byfd, Check for kernel fd == pthread fd */
+ if( fd_table[kfd] == NULL || fd_table[kfd]->type == FD_NT ) {
+ /* Assume that the kernel fd is the same */
+ return kfd;
+ }
+
+ return NOTOK; /* Not found */
+}
+
+/* ==========================================================================
+ * fd_basic_basic_unlock()
+ *
+ * The real work of unlock without the locking of fd_table[fd].lock.
+ */
+void fd_basic_basic_unlock(struct fd_table_entry * entry, int lock_type)
+{
+ struct pthread *pthread;
+
+ if (entry->r_owner == pthread_run) {
+ if ((entry->type == FD_HALF_DUPLEX) ||
+ (entry->type == FD_TEST_HALF_DUPLEX) ||
+ (lock_type == FD_READ) || (lock_type == FD_RDWR)) {
+ if (entry->r_lockcount == 0) {
+ if (pthread = pthread_queue_deq(&entry->r_queue)) {
+ pthread_sched_prevent();
+ entry->r_owner = pthread;
+ if ((SET_PF_DONE_EVENT(pthread)) == OK) {
+ pthread_sched_other_resume(pthread);
+ } else {
+ pthread_sched_resume();
+ }
+ } else {
+ entry->r_owner = NULL;
+ }
+ } else {
+ entry->r_lockcount--;
+ }
+ }
+ }
+
+ if (entry->w_owner == pthread_run) {
+ if ((entry->type != FD_HALF_DUPLEX) &&
+ (entry->type != FD_TEST_HALF_DUPLEX) &&
+ ((lock_type == FD_WRITE) || (lock_type == FD_RDWR))) {
+ if (entry->w_lockcount == 0) {
+ if (pthread = pthread_queue_deq(&entry->w_queue)) {
+ pthread_sched_prevent();
+ entry->w_owner = pthread;
+ if ((SET_PF_DONE_EVENT(pthread)) == OK) {
+ pthread_sched_other_resume(pthread);
+ } else {
+ pthread_sched_resume();
+ }
+ } else {
+ entry->w_owner = NULL;
+ }
+ } else {
+ entry->w_lockcount--;
+ }
+ }
+ }
+}
+
+/* ==========================================================================
+ * fd_basic_unlock()
+ */
+void fd_basic_unlock(int fd, int lock_type)
+{
+ fd_basic_basic_unlock(fd_table[fd], lock_type);
+}
+
+/* ==========================================================================
+ * fd_unlock()
+ */
+void fd_unlock(int fd, int lock_type)
+{
+ pthread_mutex_t *mutex;
+
+ mutex = &(fd_table[fd]->mutex);
+ pthread_mutex_lock(mutex);
+ fd_basic_basic_unlock(fd_table[fd], lock_type);
+ pthread_mutex_unlock(mutex);
+}
+
+/* ==========================================================================
+ * fd_basic_lock()
+ *
+ * The real work of lock without the locking of fd_table[fd].lock.
+ * Be sure to leave the lock the same way you found it. i.e. locked.
+ */
+int fd_basic_lock(unsigned int fd, int lock_type, pthread_mutex_t * mutex,
+ struct timespec * timeout)
+{
+ semaphore *plock;
+
+ switch (fd_table[fd]->type) {
+ case FD_NIU:
+ /* If not in use return EBADF error */
+ SET_ERRNO(EBADF);
+ return(NOTOK);
+ break;
+ case FD_NT:
+ /*
+ * If not tested, test it and see if it is valid
+ * If not ok return EBADF error
+ */
+ fd_kern_init(fd);
+ if (fd_table[fd]->type == FD_NIU) {
+ SET_ERRNO(EBADF);
+ return(NOTOK);
+ }
+ break;
+ case FD_TEST_HALF_DUPLEX:
+ case FD_TEST_FULL_DUPLEX:
+ /* If a parent process reset the fd to its proper state */
+ if (!fork_lock) {
+ /* It had better be a kernel fd */
+ fd_kern_reset(fd);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if ((fd_table[fd]->type == FD_HALF_DUPLEX) ||
+ (fd_table[fd]->type == FD_TEST_HALF_DUPLEX) ||
+ (lock_type == FD_READ) || (lock_type == FD_RDWR)) {
+ if (fd_table[fd]->r_owner) {
+ if (fd_table[fd]->r_owner != pthread_run) {
+ pthread_sched_prevent();
+ pthread_queue_enq(&fd_table[fd]->r_queue, pthread_run);
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_mutex_unlock(mutex);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
+
+ /* If we're the owner then we have to cancel the sleep */
+ if (fd_table[fd]->r_owner != pthread_run) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ SET_ERRNO(ETIMEDOUT);
+ return(NOTOK);
+ }
+ sleep_cancel(pthread_run);
+ } else {
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ } else {
+ fd_table[fd]->r_lockcount++;
+ }
+ }
+ fd_table[fd]->r_owner = pthread_run;
+ }
+ if ((fd_table[fd]->type != FD_HALF_DUPLEX) &&
+ (fd_table[fd]->type != FD_TEST_HALF_DUPLEX) &&
+ ((lock_type == FD_WRITE) || (lock_type == FD_RDWR))) {
+ if (fd_table[fd]->w_owner) {
+ if (fd_table[fd]->w_owner != pthread_run) {
+ pthread_sched_prevent();
+ pthread_queue_enq(&fd_table[fd]->w_queue, pthread_run);
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_mutex_unlock(mutex);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
+
+ /* If we're the owner then we have to cancel the sleep */
+ if (fd_table[fd]->w_owner != pthread_run) {
+ if (lock_type == FD_RDWR) {
+ /* Unlock current thread */
+ fd_basic_unlock(fd, FD_READ);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ SET_ERRNO(ETIMEDOUT);
+ return(NOTOK);
+ }
+ sleep_cancel(pthread_run);
+ } else {
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ } else {
+ fd_table[fd]->w_lockcount++;
+ }
+ }
+ fd_table[fd]->w_owner = pthread_run;
+ }
+ if (!fd_table[fd]->count) {
+ fd_basic_unlock(fd, lock_type);
+ return(NOTOK);
+ }
+ return(OK);
+}
+
+/*----------------------------------------------------------------------
+ * Function: fd_unlock_for_cancel
+ * Purpose: Unlock all fd locks held prior to being cancelled
+ * Args: void
+ * Returns:
+ * OK or NOTOK
+ * Notes:
+ * Assumes the kernel is locked on entry
+ *----------------------------------------------------------------------*/
+int
+fd_unlock_for_cancel( void )
+{
+ int i, fd;
+ struct pthread_select_data *data;
+ int rdlk, wrlk, lktype;
+ int found;
+
+ /* What we do depends on the previous state of the thread */
+ switch( pthread_run->old_state ) {
+ case PS_RUNNING:
+ case PS_JOIN:
+ case PS_SLEEP_WAIT:
+ case PS_WAIT_WAIT:
+ case PS_SIGWAIT:
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ case PS_DEAD:
+ case PS_UNALLOCED:
+ break; /* Nothing to do */
+
+ case PS_COND_WAIT:
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP );
+ /* Must reaquire the mutex according to the standard */
+ if( pthread_run->data.mutex == NULL ) {
+ PANIC();
+ }
+ pthread_mutex_lock( pthread_run->data.mutex );
+ break;
+
+ case PS_FDR_WAIT:
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
+ /* Free the lock on the fd being used */
+ fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd );
+ if( fd == NOTOK ) {
+ PANIC(); /* Can't find fd */
+ }
+ fd_unlock( fd, FD_READ );
+ break;
+
+ case PS_FDW_WAIT: /* Waiting on i/o */
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
+ /* Free the lock on the fd being used */
+ fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd );
+ if( fd == NOTOK ) {
+ PANIC(); /* Can't find fd */
+ }
+ fd_unlock( fd, FD_WRITE );
+ break;
+
+ case PS_SELECT_WAIT:
+ data = pthread_run->data.select_data;
+
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
+
+ for( i = 0; i < data->nfds; i++) {
+ rdlk =(FD_ISSET(i,&data->readfds)
+ || FD_ISSET(i,&data->exceptfds));
+ wrlk = FD_ISSET(i, &data->writefds);
+ lktype = rdlk ? (wrlk ? FD_RDWR : FD_READ) : FD_WRITE;
+
+ if( ! (rdlk || wrlk) )
+ continue; /* No locks, no unlock */
+
+ if( (fd = fd_get_pthread_fd_from_kernel_fd( i )) == NOTOK ) {
+ PANIC(); /* Can't find fd */
+ }
+
+ fd_unlock( fd, lktype );
+ }
+ break;
+
+ case PS_MUTEX_WAIT:
+ PANIC(); /* Should never cancel a mutex wait */
+
+ default:
+ PANIC(); /* Unknown thread status */
+ }
+}
+
+/* ==========================================================================
+ * fd_lock()
+ */
+#define pthread_mutex_lock_timedwait(a, b) pthread_mutex_lock(a)
+
+int fd_lock(unsigned int fd, int lock_type, struct timespec * timeout)
+{
+ struct timespec current_time;
+ pthread_mutex_t *mutex;
+ int error;
+
+ if ((error = fd_check_entry(fd)) == OK) {
+ mutex = &(fd_table[fd]->mutex);
+ if (pthread_mutex_lock_timedwait(mutex, timeout)) {
+ SET_ERRNO(ETIMEDOUT);
+ return(-ETIMEDOUT);
+ }
+ error = fd_basic_lock(fd, lock_type, mutex, timeout);
+ pthread_mutex_unlock(mutex);
+ }
+ return(error);
+}
+
+/* ==========================================================================
+ * fd_free()
+ *
+ * Assumes fd is locked and owner by pthread_run
+ * Don't clear the queues, fd_unlock will do that.
+ */
+struct fd_table_entry * fd_free(int fd)
+{
+ struct fd_table_entry *fd_valid;
+
+ fd_valid = NULL;
+ fd_table[fd]->r_lockcount = 0;
+ fd_table[fd]->w_lockcount = 0;
+ if (--fd_table[fd]->count) {
+ fd_valid = fd_table[fd];
+ fd_table[fd] = fd_table[fd]->next;
+ fd_valid->next = fd_table[fd]->next;
+ /* Don't touch queues of fd_valid */
+ }
+
+ fd_table[fd]->type = FD_NIU;
+ fd_table[fd]->fd.i = NOTOK;
+ fd_table[fd]->next = NULL;
+ fd_table[fd]->flags = 0;
+ fd_table[fd]->count = 0;
+ return(fd_valid);
+}
+
+
+/* ==========================================================================
+ * ======================================================================= */
+
+/* ==========================================================================
+ * read_timedwait()
+ */
+ssize_t read_timedwait(int fd, void *buf, size_t nbytes,
+ struct timespec * timeout)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ ret = fd_table[fd]->ops->read(fd_table[fd]->fd,
+ fd_table[fd]->flags, buf, nbytes, timeout);
+ fd_unlock(fd, FD_READ);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * read()
+ */
+ssize_t read(int fd, void *buf, size_t nbytes)
+{
+ return(read_timedwait(fd, buf, nbytes, NULL));
+}
+
+/* ==========================================================================
+ * readv_timedwait()
+ */
+int readv_timedwait(int fd, const struct iovec *iov, int iovcnt,
+ struct timespec * timeout)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ ret = fd_table[fd]->ops->readv(fd_table[fd]->fd,
+ fd_table[fd]->flags, iov, iovcnt, timeout);
+ fd_unlock(fd, FD_READ);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * readv()
+ */
+ssize_t readv(int fd, const struct iovec *iov, int iovcnt)
+{
+ return(readv_timedwait(fd, iov, iovcnt, NULL));
+}
+
+/* ==========================================================================
+ * write()
+ */
+ssize_t write_timedwait(int fd, const void *buf, size_t nbytes,
+ struct timespec * timeout)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK)
+ {
+ ret = fd_table[fd]->ops->write(fd_table[fd]->fd,
+ fd_table[fd]->flags, buf, nbytes,
+ timeout);
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * write()
+ */
+ssize_t write(int fd, const void * buf, size_t nbytes)
+{
+ return(write_timedwait(fd, buf, nbytes, NULL));
+}
+
+/* ==========================================================================
+ * writev_timedwait()
+ */
+int writev_timedwait(int fd, const struct iovec *iov, int iovcnt,
+ struct timespec * timeout)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ ret = fd_table[fd]->ops->writev(fd_table[fd]->fd,
+ fd_table[fd]->flags, iov, iovcnt, timeout);
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * writev()
+ */
+ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
+{
+ return(writev_timedwait(fd, iov, iovcnt, NULL));
+}
+
+/* ==========================================================================
+ * lseek()
+ */
+off_t lseek(int fd, off_t offset, int whence)
+{
+ off_t ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ ret = fd_table[fd]->ops->seek(fd_table[fd]->fd,
+ fd_table[fd]->flags, offset, whence);
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * close()
+ *
+ * The whole close procedure is a bit odd and needs a bit of a rethink.
+ * For now close() locks the fd, calls fd_free() which checks to see if
+ * there are any other fd values poinging to the same real fd. If so
+ * It breaks the wait queue into two sections those that are waiting on fd
+ * and those waiting on other fd's. Those that are waiting on fd are connected
+ * to the fd_table[fd] queue, and the count is set to zero, (BUT THE LOCK IS NOT
+ * RELEASED). close() then calls fd_unlock which give the fd to the next queued
+ * element which determins that the fd is closed and then calls fd_unlock etc...
+ *
+ * XXX close() is even uglier now. You may assume that the kernel fd is the
+ * same as fd if fd_table[fd] == NULL or if fd_table[fd]->type == FD_NT.
+ * This is true because before any fd_table[fd] is allocated the corresponding
+ * kernel fd must be checks to see if it's valid.
+ */
+int close(int fd)
+{
+ struct fd_table_entry * entry;
+ pthread_mutex_t *mutex;
+ union fd_data realfd;
+ int ret, flags;
+
+ if(fd < 0 || fd >= dtablesize)
+ {
+ SET_ERRNO(EBADF);
+ return -1;
+ }
+ /* Need to lock the newfd by hand */
+ pthread_mutex_lock(&fd_table_mutex);
+ if (fd_table[fd]) {
+ pthread_mutex_unlock(&fd_table_mutex);
+ mutex = &(fd_table[fd]->mutex);
+ pthread_mutex_lock(mutex);
+
+ /*
+ * XXX Gross hack ... because of fork(), any fd closed by the
+ * parent should not change the fd of the child, unless it owns it.
+ */
+ switch(fd_table[fd]->type) {
+ case FD_NIU:
+ pthread_mutex_unlock(mutex);
+ ret = -EBADF;
+ break;
+ case FD_NT:
+ /*
+ * If it's not tested then the only valid possibility is it's
+ * kernel fd.
+ */
+ ret = machdep_sys_close(fd);
+ fd_table[fd]->type = FD_NIU;
+ pthread_mutex_unlock(mutex);
+ break;
+ case FD_TEST_FULL_DUPLEX:
+ case FD_TEST_HALF_DUPLEX:
+ realfd = fd_table[fd]->fd;
+ flags = fd_table[fd]->flags;
+ if ((entry = fd_free(fd)) == NULL) {
+ ret = fd_table[fd]->ops->close(realfd, flags);
+ } else {
+ /* There can't be any others waiting for fd. */
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
+ mutex = &(fd_table[fd]->mutex);
+ }
+ pthread_mutex_unlock(mutex);
+ break;
+ default:
+ ret = fd_basic_lock(fd, FD_RDWR, mutex, NULL);
+ if (ret == OK) {
+ realfd = fd_table[fd]->fd;
+ flags = fd_table[fd]->flags;
+ pthread_mutex_unlock(mutex);
+ if ((entry = fd_free(fd)) == NULL) {
+ ret = fd_table[fd]->ops->close(realfd, flags);
+ } else {
+ fd_basic_basic_unlock(entry, FD_RDWR);
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
+ }
+ fd_unlock(fd, FD_RDWR);
+ } else {
+ pthread_mutex_unlock(mutex);
+ }
+ break;
+ }
+ } else {
+ /* Don't bother creating a table entry */
+ pthread_mutex_unlock(&fd_table_mutex);
+ ret = machdep_sys_close(fd);
+ }
+ if( ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * fd_basic_dup()
+ *
+ *
+ * This is a MAJOR guess!! I don't know if the mutext unlock is valid
+ * in the BIG picture. But it seems to be needed to avoid deadlocking
+ * with ourselves when we try to close the duped file descriptor.
+ */
+static inline void fd_basic_dup(int fd, int newfd)
+{
+ fd_table[newfd]->next = fd_table[fd]->next;
+ fd_table[fd]->next = fd_table[newfd];
+ fd_table[newfd] = fd_table[fd];
+ fd_table[fd]->count++;
+ pthread_mutex_unlock(&fd_table[newfd]->next->mutex);
+
+}
+
+/* ==========================================================================
+ * dup2()
+ *
+ * Note: Always lock the lower number fd first to avoid deadlocks.
+ * Note: Leave the newfd locked. It will be unlocked at close() time.
+ * Note: newfd must be locked by hand so it can be closed if it is open,
+ * or it won't be opened while dup is in progress.
+ */
+int dup2(fd, newfd)
+{
+ struct fd_table_entry * entry;
+ pthread_mutex_t *mutex;
+ union fd_data realfd;
+ int ret, flags;
+
+ if ((ret = fd_check_entry(newfd)) != OK)
+ return ret;
+
+ if (newfd < dtablesize) {
+ if (fd < newfd) {
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ /* Need to lock the newfd by hand */
+ mutex = &(fd_table[newfd]->mutex);
+ pthread_mutex_lock(mutex);
+
+ /* Is it inuse */
+ if (fd_basic_lock(newfd, FD_RDWR, mutex, NULL) == OK) {
+ realfd = fd_table[newfd]->fd;
+ flags = fd_table[newfd]->flags;
+ /* free it and check close status */
+ if ((entry = fd_free(newfd)) == NULL) {
+ entry = fd_table[newfd];
+ entry->ops->close(realfd, flags);
+ if (entry->r_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->r_queue.q_last->next =
+ entry->r_queue.q_next;
+ } else {
+ fd_table[fd]->r_queue.q_next =
+ entry->r_queue.q_next;
+ }
+ fd_table[fd]->r_queue.q_last =
+ entry->r_queue.q_last;
+ }
+ if (entry->w_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->w_queue.q_last->next =
+ entry->w_queue.q_next;
+ } else {
+ fd_table[fd]->w_queue.q_next =
+ entry->w_queue.q_next;
+ }
+ fd_table[fd]->w_queue.q_last =
+ entry->w_queue.q_last;
+ }
+ entry->r_queue.q_next = NULL;
+ entry->w_queue.q_next = NULL;
+ entry->r_queue.q_last = NULL;
+ entry->w_queue.q_last = NULL;
+ entry->r_owner = NULL;
+ entry->w_owner = NULL;
+ ret = OK;
+ } else {
+ fd_basic_basic_unlock(entry, FD_RDWR);
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
+ }
+ }
+ fd_basic_dup(fd, newfd);
+ }
+ fd_unlock(fd, FD_RDWR);
+ } else {
+ /* Need to lock the newfd by hand */
+ mutex = &(fd_table[newfd]->mutex);
+ pthread_mutex_lock(mutex);
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ /* Is newfd inuse */
+ if ((ret = fd_basic_lock(newfd, FD_RDWR, mutex, NULL)) == OK) {
+ realfd = fd_table[newfd]->fd;
+ flags = fd_table[newfd]->flags;
+ /* free it and check close status */
+ if ((entry = fd_free(newfd)) == NULL) {
+ entry = fd_table[newfd];
+ entry->ops->close(realfd, flags);
+ if (entry->r_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->r_queue.q_last->next =
+ entry->r_queue.q_next;
+ } else {
+ fd_table[fd]->r_queue.q_next =
+ entry->r_queue.q_next;
+ }
+ fd_table[fd]->r_queue.q_last =
+ entry->r_queue.q_last;
+ }
+ if (entry->w_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->w_queue.q_last->next =
+ entry->w_queue.q_next;
+ } else {
+ fd_table[fd]->w_queue.q_next =
+ entry->w_queue.q_next;
+ }
+ fd_table[fd]->w_queue.q_last =
+ entry->w_queue.q_last;
+ }
+ entry->r_queue.q_next = NULL;
+ entry->w_queue.q_next = NULL;
+ entry->r_queue.q_last = NULL;
+ entry->w_queue.q_last = NULL;
+ entry->r_owner = NULL;
+ entry->w_owner = NULL;
+ ret = OK;
+ } else {
+ fd_basic_basic_unlock(entry, FD_RDWR);
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
+ }
+ fd_basic_dup(fd, newfd);
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ }
+ } else {
+ ret = NOTOK;
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * dup()
+ */
+int dup(int fd)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ ret = fd_allocate();
+ fd_basic_dup(fd, ret);
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * fcntl()
+ */
+int fcntl(int fd, int cmd, ...)
+{
+ int ret, realfd, flags;
+ struct flock *flock;
+ semaphore *plock;
+ va_list ap;
+
+ flags = 0;
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ va_start(ap, cmd);
+ switch(cmd) {
+ case F_DUPFD:
+ ret = fd_allocate();
+ fd_basic_dup(va_arg(ap, int), ret);
+ break;
+ case F_SETFD:
+ break;
+ case F_GETFD:
+ break;
+ case F_GETFL:
+ ret = fd_table[fd]->flags;
+ break;
+ case F_SETFL:
+ flags = va_arg(ap, int);
+ if ((ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd,
+ fd_table[fd]->flags, cmd, flags | __FD_NONBLOCK)) == OK) {
+ fd_table[fd]->flags = flags;
+ }
+ break;
+/* case F_SETLKW: */
+ /*
+ * Do the same as SETLK but if it fails with EACCES or EAGAIN
+ * block the thread and try again later, not implemented yet
+ */
+/* case F_SETLK: */
+/* case F_GETLK:
+ flock = va_arg(ap, struct flock*);
+ ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd,
+ fd_table[fd]->flags, cmd, flock);
+ break; */
+ default:
+ /* Might want to make va_arg use a union */
+ ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd,
+ fd_table[fd]->flags, cmd, va_arg(ap, void*));
+ break;
+ }
+ va_end(ap);
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * getdtablesize()
+ */
+int getdtablesize()
+{
+ return dtablesize;
+}
+
+/* ==========================================================================
+ * ioctl()
+ *
+ * Really want to do a real implementation of this that parses the args ala
+ * fcntl(), above, but it will have to be a totally platform-specific,
+ * nightmare-on-elm-st-style sort of thing. Might even deserve its own file
+ * ala select()... --SNL
+ */
+#ifndef ioctl_request_type
+#define ioctl_request_type unsigned long /* Dummy patch by Monty */
+#endif
+
+int
+ioctl(int fd, ioctl_request_type request, ...)
+{
+ int ret;
+ pthread_va_list ap;
+ caddr_t arg;
+
+ va_start( ap, request ); /* Get the arg */
+ arg = va_arg(ap,caddr_t);
+ va_end( ap );
+
+ if (fd < 0 || fd >= dtablesize)
+ ret = NOTOK;
+ else if (fd_table[fd]->fd.i == NOTOK)
+ ret = machdep_sys_ioctl(fd, request, arg);
+ else if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ ret = machdep_sys_ioctl(fd_table[fd]->fd.i, request, arg);
+ if( ret == 0 && request == FIONBIO ) {
+ /* Properly set NONBLOCK flag */
+ int v = *(int *)arg;
+ if( v )
+ fd_table[fd]->flags |= __FD_NONBLOCK;
+ else
+ fd_table[fd]->flags &= ~__FD_NONBLOCK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
+
diff --git a/mit-pthreads/pthreads/fd_kern.c b/mit-pthreads/pthreads/fd_kern.c
new file mode 100644
index 00000000000..f4ada4e4fd4
--- /dev/null
+++ b/mit-pthreads/pthreads/fd_kern.c
@@ -0,0 +1,1950 @@
+/* ==== fd_kern.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Deals with the valid kernel fds.
+ *
+ * 1.00 93/09/27 proven
+ * -Started coding this file.
+ *
+ * 1.01 93/11/13 proven
+ * -The functions readv() and writev() added.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include "config.h"
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/compat.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <stdarg.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <pthread/posix.h>
+#include <string.h>
+
+#if defined (HAVE_SYSCALL_SENDTO) && !defined (HAVE_SYSCALL_SEND)
+
+pthread_ssize_t machdep_sys_send (int fd, const void *msg, size_t len,
+ int flags)
+{
+ return machdep_sys_sendto (fd, msg, len, flags,
+ (const struct sockaddr *) 0, 0);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECVFROM) && !defined (HAVE_SYSCALL_RECV)
+
+pthread_ssize_t machdep_sys_recv (int fd, void *buf, size_t len, int flags)
+{
+ return machdep_sys_recvfrom (fd, buf, len, flags,
+ (struct sockaddr *) 0, (int *) 0);
+}
+
+#endif
+
+/* ==========================================================================
+ * Check if there is any signal with must be handled. Added by Monty
+ * This could be somewhat system dependent but it should work.
+ */
+
+static int fd_check_if_pending_signal(struct pthread *pthread)
+{
+ int i;
+ unsigned long *pending,*mask;
+ if (!pthread->sigcount)
+ return 0;
+ pending= (unsigned long*) &pthread->sigpending;
+ mask= (unsigned long*) &pthread->sigmask;
+
+ for (i=0 ; i < sizeof(pthread->sigpending)/sizeof(unsigned long); i++)
+ {
+ if (*pending && (*mask ^ (unsigned) ~0L))
+ return 1;
+ pending++;
+ mask++;
+ }
+ return 0;
+}
+
+/* ==========================================================================
+ * Variables used by both fd_kern_poll and fd_kern_wait
+ */
+struct pthread_queue fd_wait_read = PTHREAD_QUEUE_INITIALIZER;
+struct pthread_queue fd_wait_write = PTHREAD_QUEUE_INITIALIZER;
+struct pthread_queue fd_wait_select = PTHREAD_QUEUE_INITIALIZER;
+
+static struct timeval __fd_kern_poll_timeout = { 0, 0 }; /* Moved by monty */
+extern struct timeval __fd_kern_wait_timeout;
+extern volatile sig_atomic_t sig_to_process;
+
+/*
+ * ==========================================================================
+ * Do a select if there is someting to wait for.
+ * This is to a combination of the old fd_kern_poll() and fd_kern_wait()
+ * Return 1 if nothing to do.
+ */
+
+static int fd_kern_select(struct timeval *timeout)
+{
+ fd_set fd_set_read, fd_set_write, fd_set_except;
+ struct pthread *pthread, *deq;
+ int count, i;
+
+ if (!fd_wait_read.q_next && !fd_wait_write.q_next && !fd_wait_select.q_next)
+ return 1; /* Nothing to do */
+
+ FD_ZERO(&fd_set_read);
+ FD_ZERO(&fd_set_write);
+ FD_ZERO(&fd_set_except);
+ for (pthread = fd_wait_read.q_next; pthread; pthread = pthread->next)
+ FD_SET(pthread->data.fd.fd, &fd_set_read);
+ for (pthread = fd_wait_write.q_next; pthread; pthread = pthread->next)
+ FD_SET(pthread->data.fd.fd, &fd_set_write);
+ for (pthread = fd_wait_select.q_next; pthread; pthread = pthread->next)
+ {
+ for (i = 0; i < pthread->data.select_data->nfds; i++) {
+ if (FD_ISSET(i, &pthread->data.select_data->exceptfds))
+ FD_SET(i, &fd_set_except);
+ if (FD_ISSET(i, &pthread->data.select_data->writefds))
+ FD_SET(i, &fd_set_write);
+ if (FD_ISSET(i, &pthread->data.select_data->readfds))
+ FD_SET(i, &fd_set_read);
+ }
+ }
+
+ /* Turn off interrupts for real while we set the timer. */
+
+ if (timeout == &__fd_kern_wait_timeout)
+ { /* from fd_kern_wait() */
+ sigset_t sig_to_block, oset;
+ sigfillset(&sig_to_block);
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+
+ machdep_unset_thread_timer(NULL);
+ __fd_kern_wait_timeout.tv_usec = 0;
+ __fd_kern_wait_timeout.tv_sec = (sig_to_process) ? 0 : 3600;
+
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+ }
+ /*
+ * There is a small but finite chance that an interrupt will
+ * occure between the unblock and the select. Because of this
+ * sig_handler_real() sets the value of __fd_kern_wait_timeout
+ * to zero causing the select to do a poll instead of a wait.
+ */
+
+ while ((count = machdep_sys_select(dtablesize, &fd_set_read,
+ &fd_set_write, &fd_set_except,
+ timeout)) < OK)
+ {
+ if (count == -EINTR)
+ return 0;
+ PANIC();
+ }
+
+ for (pthread = fd_wait_read.q_next; pthread; ) {
+ if (count && FD_ISSET(pthread->data.fd.fd, &fd_set_read) ||
+ fd_check_if_pending_signal(pthread))
+ {
+ if (FD_ISSET(pthread->data.fd.fd, &fd_set_read))
+ count--;
+ deq = pthread;
+ pthread = pthread->next;
+ pthread_queue_remove(&fd_wait_read, deq);
+ if (SET_PF_DONE_EVENT(deq) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, deq);
+ deq->state = PS_RUNNING;
+ }
+ continue;
+ }
+ pthread = pthread->next;
+ }
+
+ for (pthread = fd_wait_write.q_next; pthread; ) {
+ if (count && FD_ISSET(pthread->data.fd.fd, &fd_set_write) ||
+ fd_check_if_pending_signal(pthread))
+ {
+ if (FD_ISSET(pthread->data.fd.fd, &fd_set_read))
+ count--;
+ deq = pthread;
+ pthread = pthread->next;
+ pthread_queue_remove(&fd_wait_write, deq);
+ if (SET_PF_DONE_EVENT(deq) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, deq);
+ deq->state = PS_RUNNING;
+ }
+ continue;
+ }
+ pthread = pthread->next;
+ }
+
+ for (pthread = fd_wait_select.q_next; pthread; )
+ {
+ int found_one=0; /* Loop fixed by monty */
+ if (count)
+ {
+ fd_set tmp_readfds, tmp_writefds, tmp_exceptfds;
+ memcpy(&tmp_readfds, &pthread->data.select_data->readfds,
+ sizeof(fd_set));
+ memcpy(&tmp_writefds, &pthread->data.select_data->writefds,
+ sizeof(fd_set));
+ memcpy(&tmp_exceptfds, &pthread->data.select_data->exceptfds,
+ sizeof(fd_set));
+
+ for (i = 0; i < pthread->data.select_data->nfds; i++) {
+ if (FD_ISSET(i, &tmp_exceptfds))
+ {
+ if (! FD_ISSET(i, &fd_set_except))
+ FD_CLR(i, &tmp_exceptfds);
+ else
+ found_one=1;
+ }
+ if (FD_ISSET(i, &tmp_writefds))
+ {
+ if (! FD_ISSET(i, &fd_set_write))
+ FD_CLR(i, &tmp_writefds);
+ else
+ found_one=1;
+ }
+ if (FD_ISSET(i, &tmp_readfds))
+ {
+ if (! FD_ISSET(i, &fd_set_read))
+ FD_CLR(i, &tmp_readfds);
+ else
+ found_one=1;
+ }
+ }
+ if (found_one)
+ {
+ memcpy(&pthread->data.select_data->readfds, &tmp_readfds,
+ sizeof(fd_set));
+ memcpy(&pthread->data.select_data->writefds, &tmp_writefds,
+ sizeof(fd_set));
+ memcpy(&pthread->data.select_data->exceptfds, &tmp_exceptfds,
+ sizeof(fd_set));
+ }
+ }
+ if (found_one || fd_check_if_pending_signal(pthread))
+ {
+ deq = pthread;
+ pthread = pthread->next;
+ pthread_queue_remove(&fd_wait_select, deq);
+ if (SET_PF_DONE_EVENT(deq) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, deq);
+ deq->state = PS_RUNNING;
+ }
+ } else {
+ pthread = pthread->next;
+ }
+ }
+ return 0;
+}
+
+
+/* ==========================================================================
+ * fd_kern_poll()
+ *
+ * Called only from context_switch(). The kernel must be locked.
+ *
+ * This function uses a linked list of waiting pthreads, NOT a queue.
+ */
+
+void fd_kern_poll()
+{
+ fd_kern_select(&__fd_kern_poll_timeout);
+}
+
+
+/* ==========================================================================
+ * fd_kern_wait()
+ *
+ * Called when there is no active thread to run.
+ */
+
+void fd_kern_wait()
+{
+ if (fd_kern_select(&__fd_kern_wait_timeout))
+ /* No threads, waiting on I/O, do a sigsuspend */
+ sig_handler_pause();
+}
+
+
+/* ==========================================================================
+ * Special Note: All operations return the errno as a negative of the errno
+ * listed in errno.h
+ * ======================================================================= */
+
+/* ==========================================================================
+ * read()
+ */
+pthread_ssize_t __fd_kern_read(union fd_data fd_data, int flags, void *buf,
+ size_t nbytes, struct timespec * timeout)
+{
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_read(fd, buf, nbytes)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDR_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret= NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDR_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * readv()
+ */
+int __fd_kern_readv(union fd_data fd_data, int flags, const struct iovec *iov,
+ int iovcnt, struct timespec * timeout)
+{
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_readv(fd, iov, iovcnt)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDW_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDW_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * write()
+ */
+pthread_ssize_t __fd_kern_write(union fd_data fd_data, int flags,
+ const void *buf, size_t nbytes, struct timespec * timeout)
+{
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_write(fd, buf, nbytes)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * writev()
+ */
+int __fd_kern_writev(union fd_data fd_data, int flags, const struct iovec *iov,
+ int iovcnt, struct timespec * timeout)
+{
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_writev(fd, iov, iovcnt)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * For blocking version we really should set an interrupt
+ * fcntl()
+ */
+int __fd_kern_fcntl(union fd_data fd_data, int flags, int cmd, int arg)
+{
+ int fd = fd_data.i;
+
+ return(machdep_sys_fcntl(fd, cmd, arg));
+}
+
+/* ==========================================================================
+ * close()
+ */
+int __fd_kern_close(union fd_data fd_data, int flags)
+{
+ int fd = fd_data.i;
+
+ return(machdep_sys_close(fd));
+}
+
+/* ==========================================================================
+ * lseek()
+ * Assume that error number is in the range 0- 255 to get bigger
+ * range of seek. ; Monty
+ */
+off_t __fd_kern_lseek(union fd_data fd_data, int f, off_t offset, int whence)
+{
+ int fd = fd_data.i;
+ extern off_t machdep_sys_lseek(int, off_t, int);
+ off_t ret=machdep_sys_lseek(fd, offset, whence);
+ if ((long) ret < 0L && (long) ret >= -255L)
+ {
+ SET_ERRNO(ret);
+ ret= NOTOK;
+ }
+ return ret;
+}
+
+/*
+ * File descriptor operations
+ */
+extern machdep_sys_close();
+
+/* Normal file operations */
+static struct fd_ops __fd_kern_ops = {
+ __fd_kern_write, __fd_kern_read, __fd_kern_close, __fd_kern_fcntl,
+ __fd_kern_writev, __fd_kern_readv, __fd_kern_lseek, 1
+};
+
+/* NFS file opperations */
+
+/* FIFO file opperations */
+
+/* Device operations */
+
+/* ==========================================================================
+ * open()
+ *
+ * Because open could potentially block opening a file from a remote
+ * system, we want to make sure the call will timeout. We then try and open
+ * the file, and stat the file to determine what operations we should
+ * associate with the fd.
+ *
+ * This is not done yet
+ *
+ * A regular file on the local system needs no special treatment.
+ */
+int open(const char *path, int flags, ...)
+{
+ int fd, mode, fd_kern;
+ struct stat stat_buf;
+ va_list ap;
+
+ /* If pthread scheduling == FIFO set a virtual timer */
+ if (flags & O_CREAT) {
+ va_start(ap, flags);
+ mode = va_arg(ap, int);
+ va_end(ap);
+ } else {
+ mode = 0;
+ }
+
+ if (!((fd = fd_allocate()) < OK)) {
+ fd_table[fd]->flags = flags;
+ flags |= __FD_NONBLOCK;
+
+ if (!((fd_kern = machdep_sys_open(path, flags, mode)) < OK)) {
+
+ /* fstat the file to determine what type it is */
+ if (machdep_sys_fstat(fd_kern, &stat_buf)) {
+ PANIC();
+ }
+ if (S_ISREG(stat_buf.st_mode)) {
+ fd_table[fd]->ops = &(__fd_kern_ops);
+ fd_table[fd]->type = FD_HALF_DUPLEX;
+ } else {
+ fd_table[fd]->ops = &(__fd_kern_ops);
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ }
+ fd_table[fd]->fd.i = fd_kern;
+ return(fd);
+ }
+
+ fd_table[fd]->count = 0;
+ SET_ERRNO(-fd_kern);
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * create()
+ */
+int create(const char *path, mode_t mode)
+{
+ return creat (path, mode);
+}
+
+/* ==========================================================================
+ * creat()
+ */
+#undef creat
+
+int creat(const char *path, mode_t mode)
+{
+ return open (path, O_CREAT | O_TRUNC | O_WRONLY, mode);
+}
+
+/* ==========================================================================
+ * fchown()
+ */
+int fchown(int fd, uid_t owner, gid_t group)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_fchown(fd_table[fd]->fd.i, owner, group)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * fchmod()
+ */
+int fchmod(int fd, mode_t mode)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_fchmod(fd_table[fd]->fd.i, mode)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * ftruncate()
+ */
+int ftruncate(int fd, off_t length)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_ftruncate(fd_table[fd]->fd.i, length)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+#if defined (HAVE_SYSCALL_FLOCK)
+/* ==========================================================================
+ * flock()
+ *
+ * Added (mevans)
+ */
+int flock(int fd, int operation)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_flock(fd_table[fd]->fd.i,
+ operation)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+#endif
+
+/* ==========================================================================
+ * pipe()
+ */
+int pipe(int fds[2])
+{
+ int kfds[2];
+ int ret;
+
+ if ((fds[0] = fd_allocate()) >= OK) {
+ if ((fds[1] = fd_allocate()) >= OK) {
+ if ((ret = machdep_sys_pipe(kfds)) >= OK) {
+ fd_table[fds[0]]->flags = machdep_sys_fcntl(kfds[0], F_GETFL, NULL);
+ machdep_sys_fcntl(kfds[0], F_SETFL, fd_table[fds[0]]->flags | __FD_NONBLOCK);
+ fd_table[fds[1]]->flags = machdep_sys_fcntl(kfds[1], F_GETFL, NULL);
+ machdep_sys_fcntl(kfds[1], F_SETFL, fd_table[fds[1]]->flags | __FD_NONBLOCK);
+
+ fd_table[fds[0]]->ops = &(__fd_kern_ops);
+ fd_table[fds[1]]->ops = &(__fd_kern_ops);
+
+ /* Not really full duplex but ... */
+ fd_table[fds[0]]->type = FD_FULL_DUPLEX;
+ fd_table[fds[1]]->type = FD_FULL_DUPLEX;
+
+ fd_table[fds[0]]->fd.i = kfds[0];
+ fd_table[fds[1]]->fd.i = kfds[1];
+
+ return(OK);
+ } else {
+ SET_ERRNO(-ret);
+ }
+ fd_table[fds[1]]->count = 0;
+ }
+ fd_table[fds[0]]->count = 0;
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * fd_kern_reset()
+ * Change the fcntl blocking flag back to NONBLOCKING. This should only
+ * be called after a fork.
+ */
+void fd_kern_reset(int fd)
+{
+ switch (fd_table[fd]->type) {
+ case FD_TEST_HALF_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL,
+ fd_table[fd]->flags | __FD_NONBLOCK);
+ fd_table[fd]->type = FD_HALF_DUPLEX;
+ break;
+ case FD_TEST_FULL_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL,
+ fd_table[fd]->flags | __FD_NONBLOCK);
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ break;
+ default:
+ break;
+ }
+}
+
+/* ==========================================================================
+ * fd_kern_init()
+ *
+ * Assume the entry is locked before routine is invoked
+ *
+ * This may change. The problem is setting the fd to nonblocking changes
+ * the parents fd too, which may not be the desired result.
+ *
+ * New added feature: If the fd in question is a tty then we open it again
+ * and close the original, this way we don't have to worry about the
+ * fd being NONBLOCKING to the outside world.
+ */
+void fd_kern_init(int fd)
+{
+ if ((fd_table[fd]->flags = machdep_sys_fcntl(fd, F_GETFL, NULL)) >= OK) {
+ if (isatty_basic(fd)) {
+ int new_fd;
+
+ if ((new_fd = machdep_sys_open(__ttyname_basic(fd), O_RDWR)) >= OK){
+ if (machdep_sys_dup2(new_fd, fd) == OK) {
+ /* Should print a warning */
+
+ /* Should also set the flags to that of opened outside of
+ process */
+ }
+ machdep_sys_close(new_fd);
+ }
+ }
+ /* We do these things regaurdless of the above results */
+ machdep_sys_fcntl(fd, F_SETFL, fd_table[fd]->flags | __FD_NONBLOCK);
+ fd_table[fd]->ops = &(__fd_kern_ops);
+ fd_table[fd]->type = FD_HALF_DUPLEX;
+ fd_table[fd]->fd.i = fd;
+ fd_table[fd]->count = 1;
+
+ }
+}
+
+/* ==========================================================================
+ * fd_kern_gettableentry()
+ *
+ * Remember only return a a file descriptor that I will modify later.
+ * Don't return file descriptors that aren't owned by the child, or don't
+ * have kernel operations.
+ */
+static int fd_kern_gettableentry(const int child, int fd)
+{
+ int i;
+
+ for (i = 0; i < dtablesize; i++) {
+ if (fd_table[i]) {
+ if (fd_table[i]->fd.i == fd) {
+ if (child) {
+ if ((fd_table[i]->type != FD_TEST_HALF_DUPLEX) &&
+ (fd_table[i]->type != FD_TEST_FULL_DUPLEX)) {
+ continue;
+ }
+ } else {
+ if ((fd_table[i]->type == FD_NT) ||
+ (fd_table[i]->type == FD_NIU)) {
+ continue;
+ }
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[i]->ops) ||
+ (fd_table[i]->ops->use_kfds != 1)) {
+ continue;
+ }
+ return(i);
+ }
+ }
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * fd_kern_exec()
+ *
+ * Fixup the fd_table such that (fd == fd_table[fd]->fd.i) this way
+ * the new immage will be OK.
+ *
+ * Only touch those that won't be used by the parent if we're in a child
+ * otherwise fixup all.
+ *
+ * Returns:
+ * 0 no fixup necessary
+ * 1 fixup without problems
+ * 2 failed fixup on some descriptors, and clobbered them.
+ */
+int fd_kern_exec(const int child)
+{
+ int ret = 0;
+ int fd, i;
+
+ for (fd = 0; fd < dtablesize; fd++) {
+ if (fd_table[fd] == NULL) {
+ continue;
+ }
+ /* Is the fd already in use ? */
+ if (child) {
+ if ((fd_table[fd]->type != FD_TEST_HALF_DUPLEX) &&
+ (fd_table[fd]->type != FD_TEST_FULL_DUPLEX)) {
+ continue;
+ }
+ } else {
+ if ((fd_table[fd]->type == FD_NT) ||
+ (fd_table[fd]->type == FD_NIU)) {
+ continue;
+ }
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[fd]->ops) ||
+ (fd_table[fd]->ops->use_kfds != 1)) {
+ continue;
+ }
+ /* Does it match ? */
+ if (fd_table[fd]->fd.i == fd) {
+ continue;
+ }
+ /* OK, fixup entry: Read comments before changing. This isn't obvious */
+
+ /* i is the real file descriptor fd currently represents */
+ if (((i = fd_table[fd]->fd.i) >= dtablesize) || (i < 0)) {
+ /* This should never happen */
+ PANIC();
+ }
+
+ /*
+ * if the real file descriptor with the same number as the fake file
+ * descriptor number fd is actually in use by the program, we have
+ * to move it out of the way
+ */
+ if ((machdep_sys_fcntl(fd, F_GETFL, NULL)) >= OK) {
+ /* fd is busy */
+ int j;
+
+ /*
+ * j is the fake file descriptor that represents the real file
+ * descriptor that we want to move. This way the fake file
+ * descriptor fd can move its real file descriptor i such that
+ * fd == i.
+ */
+ if ((j = fd_kern_gettableentry(child, fd)) >= OK) {
+
+ /*
+ * Since j represents a fake file descriptor and fd represents
+ * a fake file descriptor. If j < fd then a previous pass
+ * should have set fd_table[j]->fd.i == j.
+ */
+ if (fd < j) {
+ if ((fd_table[j]->fd.i = machdep_sys_dup(fd)) < OK) {
+ /* Close j, there is nothing else we can do */
+ fd_table[j]->type = FD_NIU;
+ ret = 2;
+ }
+ } else {
+ /* This implies fd_table[j]->fd.i != j */
+ PANIC();
+ }
+ }
+ }
+
+ /*
+ * Here the real file descriptor i is set to equel the fake file
+ * descriptor fd
+ */
+ machdep_sys_dup2(i, fd);
+
+ /*
+ * Now comes the really complicated part: UNDERSTAND before changing
+ *
+ * Here are the things this routine wants to do ...
+ *
+ * Case 1. The real file descriptor has only one fake file descriptor
+ * representing it.
+ * fd -> i, fd != i ===> fd -> fd, close(i)
+ * Example fd = 4, i = 2: then close(2), set fd -> i = 4
+ *
+ * Case 2. The real file descriptor has more than one fake file
+ * descriptor representing it, and this is the first fake file
+ * descriptor representing the real file descriptor
+ * fd -> i, fd' -> i, fd != i ===> fd -> fd, fd' -> fd, close(i)
+ *
+ * The problem is achiving the above is very messy and difficult,
+ * but I should be able to take a short cut. If fd > i then there
+ * will be no need to ever move i, this is because the fake file
+ * descriptor foo that we would have wanted to represent the real
+ * file descriptor i has already been processed. If fd < i then by
+ * moving i to fd all subsequent fake file descriptors fd' should fall
+ * into the previous case and won't need aditional adjusting.
+ *
+ * Does this break the above fd < j check .... It shouldn't because j
+ * is a fake file descriptor and if j < fd then j has already moved
+ * its real file descriptor foo such that foo <= j therefore foo < fd
+ * and not foo == fd therefor j cannot represent the real
+ * filedescriptor that fd want to move to and be less than fd
+ */
+ if (fd < i) {
+ fd_table[fd]->fd.i = fd;
+ machdep_sys_close(i);
+ }
+ if (ret < 1) {
+ ret = 1;
+ }
+ }
+}
+
+/* ==========================================================================
+ * fd_kern_fork()
+ */
+void fd_kern_fork()
+{
+ pthread_mutex_t *mutex;
+ int fd;
+
+ for (fd = 0; fd < dtablesize; fd++) {
+ if (fd_table[fd] == NULL) {
+ continue;
+ }
+ mutex = & (fd_table[fd]->mutex);
+ if (pthread_mutex_trylock(mutex)) {
+ continue;
+ }
+ if ((fd_table[fd]->r_owner) || (fd_table[fd]->w_owner)) {
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[fd]->ops) || (fd_table[fd]->ops->use_kfds != 1)) {
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ switch (fd_table[fd]->type) {
+ case FD_HALF_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL, fd_table[fd]->flags);
+ fd_table[fd]->type = FD_TEST_HALF_DUPLEX;
+ break;
+ case FD_FULL_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL, fd_table[fd]->flags);
+ fd_table[fd]->type = FD_TEST_FULL_DUPLEX;
+ break;
+ default:
+ break;
+ }
+ pthread_mutex_unlock(mutex);
+ }
+}
+
+/* ==========================================================================
+ * Here are the berkeley socket functions. These are not POSIX.
+ * ======================================================================= */
+
+#if defined (HAVE_SYSCALL_SOCKET) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * socket()
+ */
+int socket(int af, int type, int protocol)
+{
+ int fd, fd_kern;
+
+ if (!((fd = fd_allocate()) < OK)) {
+
+ if (!((fd_kern = machdep_sys_socket(af, type, protocol)) < OK)) {
+ int tmp_flags;
+
+ tmp_flags = machdep_sys_fcntl(fd_kern, F_GETFL, 0);
+ machdep_sys_fcntl(fd_kern, F_SETFL, tmp_flags | __FD_NONBLOCK);
+
+ /* Should fstat the file to determine what type it is */
+ fd_table[fd]->ops = & __fd_kern_ops;
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ fd_table[fd]->fd.i = fd_kern;
+ fd_table[fd]->flags = tmp_flags;
+ return(fd);
+ }
+
+ fd_table[fd]->count = 0;
+ SET_ERRNO(-fd_kern);
+ }
+ return(NOTOK);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_BIND) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * bind()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int bind(int fd, const struct sockaddr *name, socklen_t namelen)
+#else
+int bind(int fd, const struct sockaddr *name, int namelen)
+#endif
+{
+ /* Not much to do in bind */
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_bind(fd_table[fd]->fd.i, name, namelen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_CONNECT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * connect()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int connect(int fd, const struct sockaddr *name, socklen_t namelen)
+#else
+int connect(int fd, const struct sockaddr *name, int namelen)
+#endif
+{
+ struct sockaddr tmpname;
+ int ret, tmpnamelen;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_connect(fd_table[fd]->fd.i, name, namelen)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) ||
+ (ret == -EALREADY) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+ CLEAR_PF_DONE_EVENT(pthread_run);
+
+ tmpnamelen = sizeof(tmpname);
+ /* OK now lets see if it really worked */
+ if (((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
+ &tmpname, &tmpnamelen)) < OK) &&
+ (ret == -ENOTCONN))
+ {
+ /* Get the error, this function should not fail */
+ machdep_sys_getsockopt(fd_table[fd]->fd.i, SOL_SOCKET,
+ SO_ERROR, &ret, &tmpnamelen);
+ SET_ERRNO(ret); /* ret is already positive (mevans) */
+ ret = NOTOK;
+ }
+ } else {
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ }
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_ACCEPT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * accept()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int accept(int fd, struct sockaddr *name, socklen_t *namelen)
+#else
+int accept(int fd, struct sockaddr *name, int *namelen)
+#endif
+{
+ int ret, fd_kern;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ while ((fd_kern = machdep_sys_accept(fd_table[fd]->fd.i, name, namelen)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((fd_kern == -EWOULDBLOCK) || (fd_kern == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ } else {
+ fd_unlock(fd, FD_RDWR);
+ SET_ERRNO(-fd_kern);
+ return(NOTOK);
+ }
+ }
+ fd_unlock(fd, FD_RDWR);
+
+ if (!((ret = fd_allocate()) < OK)) {
+
+ /* This may be unnecessary */
+ machdep_sys_fcntl(fd_kern, F_SETFL, __FD_NONBLOCK);
+
+ /* Should fstat the file to determine what type it is */
+ fd_table[ret]->ops = & __fd_kern_ops;
+ fd_table[ret]->type = FD_FULL_DUPLEX;
+ fd_table[ret]->fd.i = fd_kern;
+
+ /* XXX Flags should be the same as those on the listening fd */
+ fd_table[ret]->flags = fd_table[fd]->flags;
+ }
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_LISTEN) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * listen()
+ */
+int listen(int fd, int backlog)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SEND) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * send_timedwait()
+ */
+ssize_t send_timedwait(int fd, const void * msg, size_t len, int flags,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ while ((ret = machdep_sys_send(fd_table[fd]->fd.i,
+ msg, len, flags)) < OK)
+ {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN)))
+ {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return ret;
+}
+
+/* ==========================================================================
+ * send()
+ */
+ssize_t send(int fd, const void * msg, size_t len, int flags)
+{
+ return(send_timedwait(fd, msg, len, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SENDTO) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * sendto_timedwait()
+ */
+ssize_t sendto_timedwait(int fd, const void * msg, size_t len,
+ int flags, const struct sockaddr *to, int to_len,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ while ((ret = machdep_sys_sendto(fd_table[fd]->fd.i,
+ msg, len, flags, to, to_len)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret= -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+ }
+ else
+ break; /* ret contains the errorcode */
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * sendto()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+ssize_t sendto(int fd, const void * msg, size_t len, int flags,
+ const struct sockaddr *to, socklen_t to_len)
+#else
+ssize_t sendto(int fd, const void * msg, size_t len, int flags,
+ const struct sockaddr *to, int to_len)
+#endif
+{
+ return(sendto_timedwait(fd, msg, len, flags, to, to_len, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SENDMSG) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * sendmsg_timedwait()
+ */
+ssize_t sendmsg_timedwait(int fd, const struct msghdr *msg, int flags,
+ struct timespec * timeout)
+{
+ int passed_fd, ret, i;
+
+ /* Handle getting the real file descriptor */
+ for(i = 0; i < (((struct omsghdr *)msg)->msg_accrightslen/sizeof(i)); i++) {
+ passed_fd = *(((int *)((struct omsghdr *)msg)->msg_accrights) + i);
+ if ((ret = fd_lock(passed_fd, FD_RDWR, NULL)) == OK) {
+ *(((int *)((struct omsghdr *)msg)->msg_accrights) + i)
+ = fd_table[passed_fd]->fd.i;
+ machdep_sys_fcntl(fd_table[passed_fd]->fd.i, F_SETFL,
+ fd_table[passed_fd]->flags);
+ switch(fd_table[passed_fd]->type) {
+ case FD_TEST_FULL_DUPLEX:
+ case FD_TEST_HALF_DUPLEX:
+ break;
+ case FD_FULL_DUPLEX:
+ fd_table[passed_fd]->type = FD_TEST_FULL_DUPLEX;
+ break;
+ case FD_HALF_DUPLEX:
+ fd_table[passed_fd]->type = FD_TEST_HALF_DUPLEX;
+ break;
+ default:
+ PANIC();
+ }
+ } else {
+ fd_unlock(fd, FD_RDWR);
+ SET_ERRNO(EBADF);
+ return(NOTOK);
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ while((ret = machdep_sys_sendmsg(fd_table[fd]->fd.i, msg, flags)) < OK){
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * sendmsg()
+ */
+ssize_t sendmsg(int fd, const struct msghdr *msg, int flags)
+{
+ return(sendmsg_timedwait(fd, msg, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECV) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * recv_timedwait()
+ */
+ssize_t recv_timedwait(int fd, void * buf, size_t len, int flags,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ while ((ret = machdep_sys_recv(fd_table[fd]->fd.i,
+ buf, len, flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+
+ } else {
+ break;
+ }
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * recv()
+ */
+ssize_t recv(int fd, void * buf, size_t len, int flags)
+{
+ return(recv_timedwait(fd, buf, len, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECVFROM) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * recvfrom_timedwait()
+ */
+ssize_t recvfrom_timedwait(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, int * from_len,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ while ((ret = machdep_sys_recvfrom(fd_table[fd]->fd.i,
+ buf, len, flags, from, from_len)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret= -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * recvfrom()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+ssize_t recvfrom(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, socklen_t * from_len)
+#else
+ssize_t recvfrom(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, int * from_len)
+#endif
+{
+ return(recvfrom_timedwait(fd, buf, len, flags, from, from_len, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECVMSG) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * recvmsg_timedwait()
+ */
+ssize_t recvmsg_timedwait(int fd, struct msghdr *msg, int flags,
+ struct timespec * timeout)
+{
+ struct stat stat_buf;
+ int passed_fd, ret, i;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ while ((ret = machdep_sys_recvmsg(fd_table[fd]->fd.i, msg, flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ fd_unlock(fd, FD_READ);
+
+ /* Handle getting the real file descriptor */
+ for (i = 0; i < (((struct omsghdr *)msg)->msg_accrightslen / sizeof(i));
+ i++) {
+ passed_fd = *(((int *)((struct omsghdr *)msg)->msg_accrights) + i);
+ if (!((fd = fd_allocate()) < OK)) {
+ fd_table[fd]->flags = machdep_sys_fcntl(passed_fd, F_GETFL);
+
+ if (!( fd_table[fd]->flags & __FD_NONBLOCK)) {
+ machdep_sys_fcntl(passed_fd, F_SETFL,
+ fd_table[fd]->flags | __FD_NONBLOCK);
+ }
+
+ /* fstat the file to determine what type it is */
+ machdep_sys_fstat(passed_fd, &stat_buf);
+ if (S_ISREG(stat_buf.st_mode)) {
+ fd_table[fd]->type = FD_HALF_DUPLEX;
+ } else {
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ }
+ *(((int *)((struct omsghdr *)msg)->msg_accrights) + i) = fd;
+ fd_table[fd]->ops = &(__fd_kern_ops);
+ fd_table[fd]->fd.i = passed_fd;
+ } else {
+ SET_ERRNO(EBADF);
+ return(NOTOK);
+ break;
+ }
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * recvmsg()
+ */
+ssize_t recvmsg(int fd, struct msghdr *msg, int flags)
+{
+ return(recvmsg_timedwait(fd, msg, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SHUTDOWN) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * shutdown()
+ */
+int shutdown(int fd, int how)
+{
+ int ret;
+
+ switch(how) {
+ case 0: /* Read */
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_shutdown(fd_table[fd]->fd.i, how)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ case 1: /* Write */
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_shutdown(fd_table[fd]->fd.i, how)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ case 2: /* Read-Write */
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_shutdown(fd_table[fd]->fd.i, how)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ default:
+ SET_ERRNO(EBADF);
+ ret = NOTOK;
+ break;
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SETSOCKOPT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * setsockopt()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int setsockopt(int fd, int level, int optname, const void * optval, socklen_t optlen)
+#else
+int setsockopt(int fd, int level, int optname, const void * optval, int optlen)
+#endif
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_setsockopt(fd_table[fd]->fd.i, level,
+ optname, optval, optlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_GETSOCKOPT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * getsockopt()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int getsockopt(int fd, int level, int optname, void * optval, socklen_t * optlen)
+#else
+int getsockopt(int fd, int level, int optname, void * optval, int * optlen)
+#endif
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_getsockopt(fd_table[fd]->fd.i, level,
+ optname, optval, optlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_GETSOCKOPT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * getsockname()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int getsockname(int fd, struct sockaddr * name, socklen_t * naddrlen)
+#else
+int getsockname(int fd, struct sockaddr * name, int * naddrlen)
+#endif
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_getsockname(fd_table[fd]->fd.i,
+ name, naddrlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_GETPEERNAME) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * getpeername()
+ */
+#ifdef _OS_HAS_SOCKLEN_T
+int getpeername(int fd, struct sockaddr * peer, socklen_t * paddrlen)
+#else
+int getpeername(int fd, struct sockaddr * peer, int * paddrlen)
+#endif
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
+ peer, paddrlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ return ret;
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SOCKETPAIR) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * socketpair()
+ */
+int socketpair(int af, int type, int protocol, int pair[2])
+{
+ int ret, fd[2];
+
+ if (!((pair[0] = fd_allocate()) < OK)) {
+ if (!((pair[1] = fd_allocate()) < OK)) {
+ if (!((ret = machdep_sys_socketpair(af, type, protocol, fd)) < OK)){
+ int tmp_flags;
+
+ tmp_flags = machdep_sys_fcntl(fd[0], F_GETFL, 0);
+ machdep_sys_fcntl(fd[0], F_SETFL, tmp_flags | __FD_NONBLOCK);
+ fd_table[pair[0]]->ops = & __fd_kern_ops;
+ fd_table[pair[0]]->type = FD_FULL_DUPLEX;
+ fd_table[pair[0]]->flags = tmp_flags;
+ fd_table[pair[0]]->fd.i = fd[0];
+
+ tmp_flags = machdep_sys_fcntl(fd[1], F_GETFL, 0);
+ machdep_sys_fcntl(fd[1], F_SETFL, tmp_flags | __FD_NONBLOCK);
+ fd_table[pair[1]]->ops = & __fd_kern_ops;
+ fd_table[pair[1]]->type = FD_FULL_DUPLEX;
+ fd_table[pair[1]]->flags = tmp_flags;
+ fd_table[pair[1]]->fd.i = fd[1];
+
+ return(ret);
+ }
+ fd_table[pair[1]]->count = 0;
+ }
+ fd_table[pair[0]]->count = 0;
+ SET_ERRNO(-ret);
+ }
+ return(NOTOK);
+}
+
+#endif
diff --git a/mit-pthreads/pthreads/fd_pipe.c b/mit-pthreads/pthreads/fd_pipe.c
new file mode 100644
index 00000000000..e8bc20857ed
--- /dev/null
+++ b/mit-pthreads/pthreads/fd_pipe.c
@@ -0,0 +1,257 @@
+/* ==== fd_pipe.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : The new fast ITC pipe routines.
+ *
+ * 1.00 93/08/14 proven
+ * -Started coding this file.
+ *
+ * 1.01 93/11/13 proven
+ * -The functions readv() and writev() added.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <pthread/fd_pipe.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <pthread/posix.h>
+#include <string.h>
+#include <stdlib.h>
+
+#ifndef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+#endif
+
+/* ==========================================================================
+ * The pipe lock is never unlocked until all pthreads waiting are done with it
+ * read()
+ */
+pthread_ssize_t __pipe_read(union fd_data fd_data, int flags, void *buf,
+ size_t nbytes, struct timespec * timeout)
+{
+ struct __pipe *fd = (struct __pipe *)fd_data.ptr;
+ struct pthread * pthread;
+ int ret = 0;
+
+ if (flags & O_ACCMODE) { return(NOTOK); }
+
+ /* If there is nothing to read, go to sleep */
+ if (fd->count == 0) {
+ if (flags == WR_CLOSED) {
+ return(0);
+ }
+
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ pthread_run->next = NULL;
+ fd->wait = pthread_run;
+
+ pthread_resched_resume(PS_FDR_WAIT);
+ ret = fd->size;
+ } else {
+ ret = MIN(nbytes, fd->count);
+ memcpy(buf, fd->buf + fd->offset, ret);
+ if (!(fd->count -= ret)) {
+ fd->offset = 0;
+ }
+
+ if (pthread = fd->wait) {
+ fd->wait = NULL;
+ pthread_sched_prevent();
+ pthread_sched_other_resume(pthread);
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * __pipe_write()
+ *
+ * First check to see if the read side is still open, then
+ * check to see if there is a thread in a read wait for this pipe, if so
+ * copy as much data as possible directly into the read waiting threads
+ * buffer. The write thread(whether or not there was a read thread)
+ * copies as much data as it can into the pipe buffer and it there
+ * is still data it goes to sleep.
+ */
+pthread_ssize_t __pipe_write(union fd_data fd_data, int flags, const void *buf,
+ size_t nbytes, struct timespec * timeout) {
+ struct __pipe *fd = (struct __pipe *)fd_data.ptr;
+ struct pthread * pthread;
+ int ret, count;
+
+ if (!(flags & O_ACCMODE)) { return(NOTOK); }
+
+ while (fd->flags != RD_CLOSED) {
+ if (pthread = fd->wait) {
+
+ pthread_sched_prevent();
+
+ /* Copy data directly into waiting pthreads buf */
+ fd->wait_size = MIN(nbytes, fd->wait_size);
+ memcpy(fd->wait_buf, buf, fd->wait_size);
+ buf = (const char *)buf + fd->wait_size;
+ nbytes -= fd->wait_size;
+ ret = fd->wait_size;
+ fd->wait = NULL;
+
+ /* Wake up waiting pthread */
+ pthread_sched_other_resume(pthread);
+ }
+
+ if (count = MIN(nbytes, fd->size - (fd->offset + fd->count))) {
+ memcpy(fd->buf + (fd->offset + fd->count), buf, count);
+ buf = (const char *)buf + count;
+ nbytes -= count;
+ ret += count;
+ }
+ if (nbytes) {
+ pthread_sched_prevent();
+ fd->wait = pthread_run;
+ pthread_resched_resume(PS_FDW_WAIT);
+ } else {
+ return(ret);
+ }
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * __pipe_close()
+ *
+ * The whole close procedure is a bit odd and needs a bit of a rethink.
+ * For now close() locks the fd, calls fd_free() which checks to see if
+ * there are any other fd values poinging to the same real fd. If so
+ * It breaks the wait queue into two sections those that are waiting on fd
+ * and those waiting on other fd's. Those that are waiting on fd are connected
+ * to the fd_table[fd] queue, and the count is set to zero, (BUT THE LOCK IS NOT
+ * RELEASED). close() then calls fd_unlock which give the fd to the next queued
+ * element which determins that the fd is closed and then calls fd_unlock etc...
+ */
+int __pipe_close(struct __pipe *fd, int flags)
+{
+ struct pthread * pthread;
+
+ if (!(fd->flags)) {
+ if (pthread = fd->wait) {
+ if (flags & O_ACCMODE) {
+ fd->count = 0;
+ fd->wait = NULL;
+ fd->flags |= WR_CLOSED;
+ pthread_sched_prevent();
+ pthread_resched_resume(pthread);
+ } else {
+ /* Should send a signal */
+ fd->flags |= RD_CLOSED;
+ }
+ }
+ } else {
+ free(fd);
+ return(OK);
+ }
+}
+
+/* ==========================================================================
+ * For fcntl() which isn't implemented yet
+ * __pipe_enosys()
+ */
+static int __pipe_enosys()
+{
+ SET_ERRNO(ENOSYS);
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * For writev() and readv() which aren't implemented yet
+ * __pipe_enosys_v()
+ */
+static int __pipe_enosys_v(union fd_data fd, int flags,
+ const struct iovec *vec, int nvec,
+ struct timespec *timeout)
+{
+ SET_ERRNO(ENOSYS);
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * For lseek() which isn't implemented yet
+ * __pipe_enosys_o()
+ */
+static off_t __pipe_enosys_o()
+{
+ SET_ERRNO(ENOSYS);
+ return(NOTOK);
+}
+
+/*
+ * File descriptor operations
+ */
+struct fd_ops fd_ops[] = {
+{ __pipe_write, __pipe_read, __pipe_close, __pipe_enosys,
+ __pipe_enosys_v, __pipe_enosys_v, __pipe_enosys_o, 0 },
+};
+
+/* ==========================================================================
+ * open()
+ */
+/* int __pipe_open(const char *path, int flags, ...) */
+int newpipe(int fd[2])
+{
+ struct __pipe *fd_data;
+
+ if ((!((fd[0] = fd_allocate()) < OK)) && (!((fd[1] = fd_allocate()) < OK))) {
+ fd_data = malloc(sizeof(struct __pipe));
+ fd_data->buf = malloc(4096);
+ fd_data->size = 4096;
+ fd_data->count = 0;
+ fd_data->offset = 0;
+
+ fd_data->wait = NULL;
+ fd_data->flags = 0;
+
+ fd_table[fd[0]]->fd.ptr = fd_data;
+ fd_table[fd[0]]->flags = O_RDONLY;
+ fd_table[fd[1]]->fd.ptr = fd_data;
+ fd_table[fd[1]]->flags = O_WRONLY;
+
+ return(OK);
+ }
+ return(NOTOK);
+}
+
diff --git a/mit-pthreads/pthreads/fd_sysv.c b/mit-pthreads/pthreads/fd_sysv.c
new file mode 100644
index 00000000000..6dc01a49aa4
--- /dev/null
+++ b/mit-pthreads/pthreads/fd_sysv.c
@@ -0,0 +1,897 @@
+/* ==== fd_sysv.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Transforms BSD socket calls to SYSV streams.
+ *
+ * 1.00 94/11/19 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <config.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#if defined (HAVE_SYSCALL_PUTMSG) && defined (HAVE_SYSCALL_GETMSG) && !defined(HAVE_SYSCALL_SOCKETCALL) && !defined(HAVE_SYSCALL_SOCKET)
+#define HAVE_STREAMS 1
+
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/socket.h>
+#include <sys/stream.h>
+#include <sys/stropts.h>
+#include <tiuser.h>
+#include <sys/tihdr.h>
+#include <netinet/in.h>
+#include <sys/timod.h>
+
+#define STREAM_BUF_SIZE sizeof(union T_primitives) + sizeof(struct sockaddr)
+
+extern struct pthread_queue fd_wait_read, fd_wait_write;
+
+/* ==========================================================================
+ * putmsg_timedwait_basic()
+ */
+static int putmsg_timedwait_basic(int fd, struct strbuf * ctlptr,
+ struct strbuf * dataptr, int flags, struct timespec * timeout)
+{
+
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_putmsg(fd_table[fd]->fd.i,
+ ctlptr, dataptr, flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(& current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= -EINTR;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ break;
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * putmsg_timedwait()
+ */
+int putmsg_timedwait(int fd, struct strbuf * ctlptr, struct strbuf * dataptr,
+ int flags, struct timespec * timeout)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ ret = putmsg_timedwait_basic(fd, ctlptr, dataptr, flags, timeout);
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * putmsg()
+ */
+int putmsg(int fd, struct strbuf * ctlptr, struct strbuf * dataptr,
+ int flags)
+{
+ return(putmsg_timedwait(fd, ctlptr, dataptr, flags, NULL));
+}
+
+/* ==========================================================================
+ * getmsg_timedwait_basic()
+ */
+int getmsg_timedwait_basic(int fd, struct strbuf * ctlptr,
+ struct strbuf * dataptr, int * flags, struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_getmsg(fd_table[fd]->fd.i,
+ ctlptr, dataptr, flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(& current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= -EINTR;
+ break;
+ }
+
+ } else {
+ SET_ERRNO(-ret);
+ break;
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * getmsg_timedwait()
+ */
+int getmsg_timedwait(int fd, struct strbuf * ctlptr, struct strbuf * dataptr,
+ int * flags, struct timespec * timeout)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ ret = getmsg_timedwait_basic(fd, ctlptr, dataptr, flags, timeout);
+ fd_unlock(fd, FD_READ);
+ }
+ return (ret);
+}
+
+/* ==========================================================================
+ * getmsg()
+ */
+int getmsg(int fd, struct strbuf * ctlptr, struct strbuf * dataptr,
+ int * flags)
+{
+ return(getmsg_timedwait(fd, ctlptr, dataptr, flags, NULL));
+}
+
+#endif
+
+/* ==========================================================================
+ * Here are the berkeley socket functions implemented with stream calls.
+ * These are not POSIX.
+ * ======================================================================= */
+
+#if (!defined (HAVE_SYSCALL_BIND)) && defined(HAVE_STREAMS)
+
+/* ==========================================================================
+ * bind()
+ */
+int bind(int fd, const struct sockaddr *name, int namelen)
+{
+ char buf[STREAM_BUF_SIZE];
+ union T_primitives * res;
+ struct T_bind_req * req;
+ struct T_bind_ack * ack;
+ struct strbuf strbuf;
+ int flags, ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK)
+ {
+ req = (struct T_bind_req *)buf;
+ req->PRIM_type = T_BIND_REQ;
+ req->ADDR_length = namelen;
+ req->ADDR_offset = sizeof(struct T_bind_req);
+ req->CONIND_number = 4;
+ memcpy(buf + sizeof(struct T_bind_req), name, namelen);
+
+ strbuf.len = sizeof(struct T_bind_req) + namelen;
+ strbuf.maxlen = STREAM_BUF_SIZE;
+ strbuf.buf = buf;
+
+ if ((ret=putmsg_timedwait_basic(fd, &strbuf, NULL, 0, NULL)) == OK)
+ {
+ memset(buf, 0, STREAM_BUF_SIZE);
+
+ strbuf.len = sizeof(struct T_bind_ack) + namelen;
+ strbuf.maxlen = STREAM_BUF_SIZE;
+ strbuf.buf = buf;
+ flags = 0;
+
+ if ((ret = getmsg_timedwait_basic(fd, &strbuf, NULL,
+ &flags, NULL)) >= OK)
+ {
+ res = (union T_primitives *)buf;
+
+ switch(res->type) {
+ case T_BIND_ACK:
+ ret = OK;
+ break;
+ default:
+ SET_ERRNO(EPROTO); /* What should this be? */
+ ret = NOTOK;
+ break;
+ }
+ }
+ else
+ {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ }
+ else
+ {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_CONNECT)) && defined(HAVE_STREAMS)
+
+/* ==========================================================================
+ * connect()
+ */
+int connect(int fd, const struct sockaddr *name, int namelen)
+{
+ char buf[STREAM_BUF_SIZE];
+ union T_primitives * res;
+ struct T_conn_req * req;
+ struct T_conn_con * con;
+ struct T_ok_ack * ok;
+ struct strbuf strbuf;
+ int flags, ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK)
+ {
+ req = (struct T_conn_req *)buf;
+ req->PRIM_type = T_CONN_REQ;
+ req->DEST_length = namelen;
+ req->DEST_offset = sizeof(struct T_conn_req);
+ req->OPT_length = 0;
+ req->OPT_offset = 0;
+ memcpy(buf + sizeof(struct T_conn_req), name, namelen);
+
+ strbuf.len = sizeof(struct T_conn_req) + namelen;
+ strbuf.maxlen = STREAM_BUF_SIZE;
+ strbuf.buf = buf;
+
+ if ((ret=putmsg_timedwait_basic(fd, &strbuf, NULL, 0, NULL)) != OK)
+ goto err;
+
+ memset(buf, 0, STREAM_BUF_SIZE);
+ ok = (struct T_ok_ack *)buf;
+
+ strbuf.maxlen = STREAM_BUF_SIZE;
+ strbuf.len = STREAM_BUF_SIZE;
+ strbuf.buf = buf;
+ flags = 0;
+
+ if ((ret=getmsg_timedwait_basic(fd, &strbuf, NULL, &flags, NULL)) < OK)
+ goto err; /* Fixed by monty */
+ if (ok->PRIM_type != T_OK_ACK)
+ {
+ ret= -EPROTO; /* What should this be? */
+ goto err;
+ }
+
+ memset(buf, 0, STREAM_BUF_SIZE);
+ strbuf.maxlen = STREAM_BUF_SIZE;
+ strbuf.len = STREAM_BUF_SIZE;
+ strbuf.buf = buf;
+ flags = 0;
+
+ if ((ret=getmsg_timedwait_basic(fd, &strbuf, NULL, &flags, NULL) < OK))
+ goto err;
+
+ res = (union T_primitives *) buf;
+ switch(res->type) {
+ case T_CONN_CON:
+ ret = OK;
+ break;
+ case T_DISCON_IND:
+ ret= -ECONNREFUSED;
+ goto err;
+ default:
+ ret= -EPROTO; /* What should this be? */
+ goto err;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+
+ err:
+ fd_unlock(fd, FD_RDWR);
+ SET_ERRNO(-ret); /* Proably not needed... */
+ return NOTOK;
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_LISTEN)) && defined(HAVE_STREAMS)
+
+/* ==========================================================================
+ * listen()
+ */
+int listen(int fd, int backlog)
+{
+ return(OK);
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_SOCKET)) && defined(HAVE_STREAMS)
+
+extern ssize_t __fd_kern_write();
+static pthread_ssize_t __fd_sysv_read();
+extern int __fd_kern_close();
+extern int __fd_kern_fcntl();
+extern int __fd_kern_writev();
+extern int __fd_kern_readv();
+extern off_t __fd_kern_lseek();
+
+/* Normal file operations */
+static struct fd_ops __fd_sysv_ops = {
+ __fd_kern_write, __fd_sysv_read, __fd_kern_close, __fd_kern_fcntl,
+ __fd_kern_writev, __fd_kern_readv, __fd_kern_lseek, 1
+};
+
+/* ==========================================================================
+ * read()
+ */
+static pthread_ssize_t __fd_sysv_read(union fd_data fd_data, int flags,
+ void *buf, size_t nbytes, struct timespec * timeout)
+{
+ struct strbuf dataptr;
+ int fd = fd_data.i;
+ int getmsg_flags;
+ int ret;
+
+ getmsg_flags = 0;
+ dataptr.len = 0;
+ dataptr.buf = buf;
+ dataptr.maxlen = nbytes;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_getmsg(fd, NULL, &dataptr, &getmsg_flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(& current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ return(NOTOK);
+ }
+ } else {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ break;
+ }
+ }
+ return(dataptr.len);
+}
+
+/* ==========================================================================
+ * socket_tcp()
+ */
+static int socket_tcp(int fd)
+{
+ int ret;
+
+ if ((ret = machdep_sys_open("/dev/tcp", O_RDWR | O_NONBLOCK, 0)) >= OK) {
+ /* Should fstat the file to determine what type it is */
+ fd_table[fd]->ops = & __fd_sysv_ops;
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ fd_table[fd]->fd.i = ret;
+ fd_table[fd]->flags = 0;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * socket()
+ */
+int socket(int af, int type, int protocol)
+{
+ int fd, fd_kern;
+
+ if ((fd = fd_allocate()) < OK)
+ return (fd);
+
+ switch(af) {
+ case AF_INET:
+ switch(type) {
+ case SOCK_STREAM:
+ if ((fd_kern = socket_tcp(fd)) >= OK)
+ return(fd);
+ SET_ERRNO(-fd_kern);
+ break;
+ case SOCK_DGRAM:
+ if ((fd_kern = machdep_sys_open("/dev/udp",
+ O_RDWR | O_NONBLOCK, 0)) >= OK) {
+ /* Should fstat the file to determine what type it is */
+ fd_table[fd]->ops = & __fd_sysv_ops;
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ fd_table[fd]->fd.i = fd_kern;
+ fd_table[fd]->flags = 0;
+ return(fd);
+ }
+ SET_ERRNO(-fd_kern);
+ break;
+ default:
+ SET_ERRNO(EPROTONOSUPPORT);
+ break;
+ }
+ break;
+ case AF_UNIX:
+ case AF_ISO:
+ case AF_NS:
+ default:
+ SET_ERRNO(EPROTONOSUPPORT);
+ break;
+ }
+ fd_table[fd]->count = 0;
+ return(NOTOK); /* Fixed by monty */
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_ACCEPT)) && defined(HAVE_STREAMS)
+
+/* ==========================================================================
+ * accept_fd()
+ */
+static int accept_fd(int fd, struct sockaddr *name, int *namelen, char * buf,
+ int SEQ_number)
+{
+ struct T_conn_res * res;
+ struct strbuf strbuf;
+ int fd_new, fd_kern;
+
+ /* Get a new table entry */
+ if ((fd_new = fd_allocate()) < OK)
+ return(NOTOK);
+
+ /* Get the new kernel entry */
+ if (!((fd_kern = socket_tcp(fd_new)) < OK)) {
+ res = (struct T_conn_res *)buf;
+ res->PRIM_type = T_CONN_RES;
+ /* res->QUEUE_ptr = (queue_t *)&fd_kern; */
+ res->OPT_length = 0;
+ res->OPT_offset = 0;
+ res->SEQ_number = SEQ_number;
+
+ strbuf.maxlen = sizeof(union T_primitives) +sizeof(struct sockaddr);
+ strbuf.len = sizeof(struct T_conn_ind) + (*namelen);
+ strbuf.buf = buf;
+
+ {
+ struct strfdinsert insert;
+
+ insert.ctlbuf.maxlen = (sizeof(union T_primitives) +
+ sizeof(struct sockaddr));
+ insert.ctlbuf.len = sizeof(struct T_conn_ind);
+ insert.ctlbuf.buf = buf;
+ insert.databuf.maxlen = 0;
+ insert.databuf.len = 0;
+ insert.databuf.buf = NULL;
+ insert.flags = 0;
+ insert.fildes = fd_kern;
+ insert.offset = 4;
+ /* Should the following be checked ? */
+ machdep_sys_ioctl(fd_table[fd]->fd.i, I_FDINSERT, &insert);
+ }
+
+ /* if (putmsg_timedwait_basic(fd, &strbuf, NULL, 0, NULL) == OK) {
+ /* return(fd_new); */
+ {
+ int flags = 0;
+ int ret;
+
+ /* Should the following be checked ? */
+ ret = getmsg_timedwait_basic(fd, &strbuf, NULL, &flags, NULL);
+ return(fd_new);
+
+ }
+ machdep_sys_close(fd_kern);
+ }
+ fd_table[fd_new]->count = 0;
+ return(NOTOK);
+}
+
+
+/* ==========================================================================
+ * accept()
+ */
+int accept(int fd, struct sockaddr *name, int *namelen)
+{
+ char buf[sizeof(union T_primitives) + sizeof(struct sockaddr)];
+ struct T_conn_ind * ind;
+ struct strbuf strbuf;
+ int flags, ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK)
+ {
+ ind = (struct T_conn_ind *)buf;
+ ind->PRIM_type = T_CONN_IND;
+ ind->SRC_length = (*namelen);
+ ind->SRC_offset = sizeof(struct T_conn_ind);
+ ind->OPT_length = 0;
+ ind->OPT_offset = 0;
+ ind->SEQ_number = 0;
+
+ strbuf.maxlen = sizeof(union T_primitives) + sizeof(struct sockaddr);
+ strbuf.len = sizeof(struct T_conn_ind) + (*namelen);
+ strbuf.buf = buf;
+ flags = 0;
+
+ if ((ret=getmsg_timedwait_basic(fd, &strbuf, NULL, &flags, NULL)) < OK)
+ {
+ SET_ERRNO(-ret);
+ ret= NOTOK;
+ }
+ else
+ ret = accept_fd(fd, name, namelen, buf, ind->SEQ_number);
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+#endif /* HAVE_SYSCALL_ACCEPT */
+
+#if (!defined (HAVE_SYSCALL_SENDTO)) && defined (HAVE_STREAMS)
+
+/* ==========================================================================
+ * sendto_timedwait()
+ */
+ssize_t sendto_timedwait(int fd, const void * msg, size_t len, int flags,
+ const struct sockaddr *name, int namelen, struct timespec * timeout)
+{
+ char buf[STREAM_BUF_SIZE];
+ struct T_unitdata_req * req;
+ struct strbuf dataptr;
+ struct strbuf ctlptr;
+ ssize_t ret, prio;
+
+ req = (struct T_unitdata_req *)buf;
+ req->PRIM_type = T_UNITDATA_REQ;
+ req->DEST_length = namelen;
+ req->DEST_offset = sizeof(struct T_unitdata_req);
+ req->OPT_length = 0;
+ req->OPT_offset = 0;
+ memcpy(buf + sizeof(struct T_unitdata_req), name, namelen);
+
+ ctlptr.len = sizeof(struct T_unitdata_req) + namelen;
+ ctlptr.maxlen = STREAM_BUF_SIZE;
+ ctlptr.buf = buf;
+
+ dataptr.len = len;
+ dataptr.maxlen = len;
+ dataptr.buf = (void *)msg;
+
+ if ((ret = putmsg_timedwait(fd, &ctlptr, &dataptr, 0, timeout)) == OK) {
+ ret = len;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * sendto()
+ */
+ssize_t sendto(int fd, const void * msg, size_t len, int flags,
+ const struct sockaddr *to, int to_len)
+{
+ return(sendto_timedwait(fd, msg, len, flags, to, to_len, NULL));
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_SEND)) && defined (HAVE_STREAMS)
+
+/* ==========================================================================
+ * send_timedwait()
+ */
+ssize_t send_timedwait(int fd, const void * msg, size_t len, int flags,
+ struct timespec * timeout)
+{
+ char buf[STREAM_BUF_SIZE];
+ struct T_unitdata_req * req;
+ struct strbuf dataptr;
+ struct strbuf ctlptr;
+ ssize_t ret, prio;
+
+ req = (struct T_unitdata_req *)buf;
+ req->PRIM_type = T_UNITDATA_REQ;
+ req->DEST_length = 0;
+ req->DEST_offset = 0;
+ req->OPT_length = 0;
+ req->OPT_offset = 0;
+
+ ctlptr.len = sizeof(struct T_unitdata_req);
+ ctlptr.maxlen = STREAM_BUF_SIZE;
+ ctlptr.buf = buf;
+
+ dataptr.len = len;
+ dataptr.maxlen = len;
+ dataptr.buf = (void *)msg;
+
+ if ((ret = putmsg_timedwait(fd, &ctlptr, &dataptr, 0, timeout)) == OK) {
+ ret = len;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * send()
+ */
+ssize_t send(int fd, const void * msg, size_t len, int flags)
+{
+ return(send_timedwait(fd, msg, len, flags, NULL));
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_RECVFROM)) && defined(HAVE_STREAMS)
+
+/* ==========================================================================
+ * recvfrom_timedwait()
+ */
+ssize_t recvfrom_timedwait(int fd, void * msg, size_t len, int flags,
+ struct sockaddr * name, int * namelen, struct timespec * timeout)
+{
+ char buf[STREAM_BUF_SIZE];
+ struct T_unitdata_ind * ind;
+ struct strbuf dataptr;
+ struct strbuf ctlptr;
+ int ret, prio;
+
+ ctlptr.len = 0;
+ ctlptr.maxlen = STREAM_BUF_SIZE;
+ ctlptr.buf = buf;
+
+ dataptr.maxlen = len;
+ dataptr.len = 0;
+ dataptr.buf = msg;
+
+ prio = 0;
+
+ ret = getmsg_timedwait(fd, &ctlptr, &dataptr, &prio, timeout);
+ if (ret >= OK) {
+ if (name != NULL) {
+ ind = (struct T_unitdata_ind *)buf;
+
+ if (*namelen > ind->SRC_length)
+ *namelen = ind->SRC_length;
+ memcpy(name, buf + ind->SRC_offset, *namelen);
+ }
+ ret = dataptr.len;
+ }
+
+ return(ret);
+}
+
+/* ==========================================================================
+ * recvfrom()
+ */
+ssize_t recvfrom(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, int * from_len)
+{
+ return(recvfrom_timedwait(fd, buf, len, flags, from, from_len, NULL));
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_RECV)) && defined(HAVE_STREAMS)
+
+/* ==========================================================================
+ * recv_timedwait()
+ */
+ssize_t recv_timedwait(int fd, void * msg, size_t len, int flags,
+ struct timespec * timeout)
+{
+ char buf[STREAM_BUF_SIZE];
+ struct T_unitdata_ind * ind;
+ struct strbuf dataptr;
+ struct strbuf ctlptr;
+ int ret, prio;
+
+ ctlptr.len = 0;
+ ctlptr.maxlen = STREAM_BUF_SIZE;
+ ctlptr.buf = buf;
+
+ dataptr.maxlen = len;
+ dataptr.len = 0;
+ dataptr.buf = msg;
+
+ prio = 0;
+
+ ret = getmsg_timedwait(fd, &ctlptr, &dataptr, &prio, timeout);
+ if (ret >= OK)
+ ret = dataptr.len;
+
+ return(ret);
+}
+
+/* ==========================================================================
+ * recv()
+ */
+ssize_t recv(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, int * from_len)
+{
+ return(recv_timedwait(fd, buf, len, flags, NULL));
+}
+
+#endif
+
+#if (!defined (HAVE_SYSCALL_SETSOCKOPT)) && defined(HAVE_STREAMS)
+/* ==========================================================================
+ * setsockopt()
+ */
+int setsockopt(int s, int level, int optname, const void *optval, int optlen)
+{
+ return(0);
+}
+#endif
+
+struct foo { /* Used by getsockname and getpeername */
+ long a;
+ int b;
+ struct sockaddr *name;
+};
+
+#if (!defined (HAVE_SYSCALL_GETSOCKNAME)) && defined(HAVE_STREAMS)
+/* ==========================================================================
+ * getsockname()
+ */
+
+
+int getsockname(int s, struct sockaddr *name, int *namelen)
+{
+ struct foo foo;
+ int i;
+ if (*namelen < sizeof(struct sockaddr)) {
+ SET_ERRNO(ENOMEM);
+ return(-1);
+ }
+ foo.a = 0x84;
+ foo.b = 0;
+ foo.name = name;
+ i = ioctl(s, TI_GETMYNAME, &foo);
+ *namelen = foo.b;
+ return(i);
+}
+#endif
+
+#if (!defined (HAVE_SYSCALL_GETPEERNAME)) && defined(HAVE_STREAMS)
+/* ==========================================================================
+ * getpeername() ; Added by Monty
+ */
+
+int getpeername(int s, struct sockaddr *name, int *namelen)
+{
+ struct foo foo;
+ int i;
+ if (*namelen < sizeof(struct sockaddr)) {
+ SET_ERRNO(ENOMEM);
+ return(-1);
+ }
+ foo.a = 0x84; /* Max length ? */
+ foo.b = 0; /* Return length */
+ foo.name = name; /* Return buffer */
+ i = ioctl(s, TI_GETPEERNAME, &foo);
+ *namelen = foo.b;
+ return(i);
+}
+#endif
+
+
+#if (!defined (HAVE_SYSCALL_SHUTDOWN)) && defined(HAVE_STREAMS)
+/* ==========================================================================
+ * shutdown()
+ */
+
+int shutdown(int s, int how)
+{
+ return(0);
+}
+#endif
diff --git a/mit-pthreads/pthreads/file.c b/mit-pthreads/pthreads/file.c
new file mode 100644
index 00000000000..4b8a8aad6db
--- /dev/null
+++ b/mit-pthreads/pthreads/file.c
@@ -0,0 +1,129 @@
+/* ==== file.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : The locking functions for stdio.
+ *
+ * 1.00 93/09/04 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <stdio.h>
+
+/* ==========================================================================
+ * flockfile()
+ */
+void flockfile(FILE *fp)
+{
+ pthread_mutex_t *mutex;
+ int fd, flags;
+
+ if ((fd = fileno(fp)) >= 0) {
+ pthread_mutex_lock(mutex = &(fd_table[fd]->mutex));
+
+ if (fp->_flags & __SRW) {
+ flags = FD_READ | FD_WRITE;
+ } else {
+ if (fp->_flags & __SWR) {
+ flags = FD_WRITE;
+ } else {
+ flags = FD_READ;
+ }
+ }
+
+ /* This might fail but POSIX doesn't give a damn. */
+ fd_basic_lock(fd, flags, mutex, NULL);
+ pthread_mutex_unlock(mutex);
+ }
+}
+
+/* ==========================================================================
+ * ftrylockfile()
+ */
+int ftrylockfile(FILE *fp)
+{
+ pthread_mutex_t *mutex;
+ int fd, flags;
+
+ if ((fd = fileno(fp)) >= 0) {
+ pthread_mutex_lock(mutex = &(fd_table[fd]->mutex));
+
+ if (fp->_flags & __SRW) {
+ flags = FD_READ | FD_WRITE;
+ } else {
+ if (fp->_flags & __SWR) {
+ flags = FD_WRITE;
+ } else {
+ flags = FD_READ;
+ }
+ }
+ if (!(fd_table[fd]->r_owner && fd_table[fd]->w_owner)) {
+ fd_basic_lock(fd, flags, mutex, NULL);
+ fd = OK;
+ } else {
+ fd = NOTOK;
+ }
+ pthread_mutex_unlock(mutex);
+ } else {
+ fd = OK;
+ }
+ return(fd);
+}
+
+/* ==========================================================================
+ * funlockfile()
+ */
+void funlockfile(FILE *fp)
+{
+ pthread_mutex_t *mutex;
+ int fd, flags;
+
+ if ((fd = fileno(fp)) >= 0) {
+ pthread_mutex_lock(mutex = &(fd_table[fd]->mutex));
+
+ if (fp->_flags & __SRW) {
+ flags = FD_READ | FD_WRITE;
+ } else {
+ if (fp->_flags & __SWR) {
+ flags = FD_WRITE;
+ } else {
+ flags = FD_READ;
+ }
+ }
+ fd_basic_unlock(fd, flags);
+ pthread_mutex_unlock(mutex);
+ }
+}
+
diff --git a/mit-pthreads/pthreads/globals.c b/mit-pthreads/pthreads/globals.c
new file mode 100644
index 00000000000..921588fb220
--- /dev/null
+++ b/mit-pthreads/pthreads/globals.c
@@ -0,0 +1,85 @@
+/* ==== globals.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Global variables.
+ *
+ * 1.00 93/07/26 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+
+/*
+ * Initial thread, running thread, and top of link list of all threads.
+ */
+struct pthread *pthread_run=NULL;
+struct pthread *pthread_initial=NULL;
+struct pthread *pthread_link_list=NULL;
+
+sigset_t * uthread_sigmask; /* Current process signal mask */
+
+/*
+ * Dead thread queue, and threads elligible to be alloced queue.
+ */
+struct pthread_queue pthread_dead_queue;
+struct pthread_queue pthread_alloc_queue;
+
+/*
+ * Queue for all threads elidgeable to run this scheduling round.
+ */
+struct pthread_prio_queue * pthread_current_prio_queue=NULL;
+
+/*
+ * default thread attributes
+ */
+pthread_attr_t pthread_attr_default = { SCHED_RR, PTHREAD_DEFAULT_PRIORITY,
+ PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT };
+
+/*
+ * File table information
+ */
+struct fd_table_entry **fd_table=NULL;
+
+/*
+ * A we a fork()ed process
+ */
+volatile int fork_lock = 0;
+volatile int pthread_kernel_lock=0;
+
+/*
+ * The page size, as returned by getpagesize()
+ */
+size_t pthread_pagesize=0;
+
diff --git a/mit-pthreads/pthreads/info.c b/mit-pthreads/pthreads/info.c
new file mode 100644
index 00000000000..2b9722ba291
--- /dev/null
+++ b/mit-pthreads/pthreads/info.c
@@ -0,0 +1,77 @@
+/* hello */
+
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+
+static const char *const state_names[] = {
+#define __pthread_defstate(S,NAME) NAME,
+#include "pthread/state.def"
+#undef __pthread_defstate
+ 0
+};
+
+void (*dump_thread_info_fn) (struct pthread *, FILE *);
+
+static void
+dump_thread_info (thread, file)
+ struct pthread *thread;
+ FILE *file;
+{
+ /* machdep */
+ /* attr */
+ /* signals */
+ /* wakeup_time */
+ /* join */
+ fprintf (file, " thread @%p prio %3d %s", thread,
+ thread->pthread_priority, state_names[(int) thread->state]);
+ switch (thread->state) {
+ case PS_FDLR_WAIT:
+ fprintf (file, " fd %d[%d]", thread->data.fd.fd,
+ thread->data.fd.branch);
+ fprintf (file, " owner %pr/%pw",
+ fd_table[thread->data.fd.fd]->r_owner,
+ fd_table[thread->data.fd.fd]->w_owner);
+ break;
+ }
+ /* show where the signal handler gets run */
+ if (thread == pthread_run)
+ fprintf (file, "\t\t[ME!]");
+ fprintf (file, "\n");
+ if (dump_thread_info_fn)
+ (*dump_thread_info_fn) (thread, file);
+}
+
+static void
+pthread_dump_info_to_file (file)
+ FILE *file;
+{
+ pthread_t t;
+ for (t = pthread_link_list; t; t = t->pll)
+ dump_thread_info (t, file);
+}
+
+void
+pthread_dump_info ()
+{
+ if (ftrylockfile (stderr) != 0)
+ return;
+ fprintf (stderr, "process id %ld:\n", (long) getpid ());
+ pthread_dump_info_to_file (stderr);
+ funlockfile (stderr);
+}
+
+#ifdef SIGINFO
+static void
+sig_handler (sig)
+ int sig;
+{
+ pthread_dump_info ();
+}
+
+void
+pthread_setup_siginfo ()
+{
+ (void) signal (SIGINFO, sig_handler);
+}
+#endif
diff --git a/mit-pthreads/pthreads/init.cc b/mit-pthreads/pthreads/init.cc
new file mode 100644
index 00000000000..24a131a60a5
--- /dev/null
+++ b/mit-pthreads/pthreads/init.cc
@@ -0,0 +1,9 @@
+
+/*
+ * DO not delete this file. The hack here ensures that pthread_init() gets
+ * called before main does. This doesn't fix everything. It is still
+ * possible for a c++ module to reley on constructors that need pthreads.
+ */
+#include <pthread.h>
+
+char __pthread_init_hack = 42;
diff --git a/mit-pthreads/pthreads/malloc.c b/mit-pthreads/pthreads/malloc.c
new file mode 100644
index 00000000000..76fe03824ac
--- /dev/null
+++ b/mit-pthreads/pthreads/malloc.c
@@ -0,0 +1,383 @@
+/* ==== malloc.c ============================================================
+ * Copyright (c) 1983 Regents of the University of California.
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Malloc functions.
+ * This is a very fast storage allocator. It allocates blocks of a small
+ * number of different sizes, and keeps free lists of each size. Blocks that
+ * don't exactly fit are passed up to the next larger size. In this
+ * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
+ * This is designed for use in a virtual memory environment.
+ *
+ * 0.00 82/02/21 Chris Kingsley kingsley@cit-20
+ *
+ * 1.00 93/11/06 proven
+ * -Modified BSD libc malloc to be threadsafe.
+ *
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <sys/types.h>
+#include <string.h>
+#include <pthread/posix.h>
+
+/*
+ * The overhead on a block is at least 4 bytes. When free, this space
+ * contains a pointer to the next free block, and the bottom two bits must
+ * be zero. When in use, the first byte is set to MAGIC, and the second
+ * byte is the size index. The remaining bytes are for alignment.
+ * If range checking is enabled then a second word holds the size of the
+ * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
+ * The order of elements is critical: ov_magic must overlay the low order
+ * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
+ */
+#ifdef __alpha
+#define _MOST_RESTRICTIVE_ALIGNMENT_TYPE char*
+#else
+#define _MOST_RESTRICTIVE_ALIGNMENT_TYPE double
+#endif /* __alpha */
+union overhead {
+ _MOST_RESTRICTIVE_ALIGNMENT_TYPE __alignment_pad0;
+ union overhead *ov_next; /* when free */
+ struct {
+ u_char ovu_magic; /* magic number */
+ u_char ovu_index; /* bucket # */
+#ifdef RCHECK
+ u_short ovu_rmagic; /* range magic number */
+ size_t ovu_size; /* actual block size */
+#endif
+ } ovu;
+#define ov_magic ovu.ovu_magic
+#define ov_index ovu.ovu_index
+#define ov_rmagic ovu.ovu_rmagic
+#define ov_size ovu.ovu_size
+};
+
+#define MAGIC 0xef /* magic # on accounting info */
+#define RMAGIC 0x5555 /* magic # on range info */
+
+#ifdef RCHECK
+#define RSLOP sizeof (u_short)
+#else
+#define RSLOP 0
+#endif
+
+/*
+ * nextf[i] is the pointer to the next free block of size 2^(i+3). The
+ * smallest allocatable block is 8 bytes. The overhead information
+ * precedes the data area returned to the user.
+ */
+#define NBUCKETS 30
+static union overhead *nextf[NBUCKETS];
+#ifndef hpux
+extern char *sbrk();
+#endif
+
+static size_t pagesz; /* page size */
+static int pagebucket; /* page size bucket */
+static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#if defined(DEBUG) || defined(RCHECK)
+#define ASSERT(p) if (!(p)) botch("p")
+#include <stdio.h>
+static
+botch(s)
+ char *s;
+{
+ fprintf(stderr, "\r\nassertion botched: %s\r\n", s);
+ (void) fflush(stderr); /* just in case user buffered it */
+ abort();
+}
+#else
+#define ASSERT(p)
+#endif
+
+/* ==========================================================================
+ * morecore()
+ *
+ * Allocate more memory to the indicated bucket
+ */
+static inline void morecore(int bucket)
+{
+ register union overhead *op;
+ register size_t sz; /* size of desired block */
+ size_t amt; /* amount to allocate */
+ size_t nblks; /* how many blocks we get */
+
+ /*
+ * sbrk_size <= 0 only for big, FLUFFY, requests (about
+ * 2^30 bytes on a VAX, I think) or for a negative arg.
+ */
+ sz = 1L << (bucket + 3);
+#ifdef DEBUG
+ ASSERT(sz > 0);
+#else
+ if (sz <= 0)
+ return;
+#endif
+ if (sz < pagesz) {
+ amt = pagesz;
+ nblks = amt / sz;
+ } else {
+ amt = sz + pagesz;
+ nblks = 1;
+ }
+ op = (union overhead *)sbrk(amt);
+ /* no more room! */
+ if (op == (union overhead *) -1)
+ return;
+ /*
+ * Add new memory allocated to that on
+ * free list for this hash bucket.
+ */
+ nextf[bucket] = op;
+ while (--nblks > 0) {
+ op->ov_next = (union overhead *)((caddr_t)op + sz);
+ op = (union overhead *)((caddr_t)op + sz);
+ }
+}
+
+/* ==========================================================================
+ * malloc()
+ */
+void *malloc(size_t nbytes)
+{
+ pthread_mutex_t *mutex;
+ union overhead *op;
+ size_t amt;
+ size_t bucket, n;
+
+ mutex = &malloc_mutex;
+ pthread_mutex_lock(mutex);
+ /*
+ * First time malloc is called, setup page size and
+ * align break pointer so all data will be page aligned.
+ */
+ if (pagesz == 0) {
+ size_t x;
+ pagesz = n = getpagesize();
+ op = (union overhead *)sbrk(0);
+ x = sizeof (*op) - ((long)op & (n - 1));
+ if (n < x)
+ n = n + pagesz - x;
+ else
+ n = n - x;
+ if (n) {
+ if (sbrk(n) == (char *)-1) {
+ /* Unlock before returning (mevans) */
+ pthread_mutex_unlock(mutex);
+ return (NULL);
+ }
+ }
+ bucket = 0;
+ amt = 8;
+ while (pagesz > amt) {
+ amt <<= 1;
+ bucket++;
+ }
+ pagebucket = bucket;
+ }
+ /*
+ * Convert amount of memory requested into closest block size
+ * stored in hash buckets which satisfies request.
+ * Account for space used per block for accounting.
+ */
+ if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
+#ifndef RCHECK
+ amt = 8; /* size of first bucket */
+ bucket = 0;
+#else
+ amt = 16; /* size of first bucket */
+ bucket = 1;
+#endif
+ n = -(sizeof (*op) + RSLOP);
+ } else {
+ amt = pagesz;
+ bucket = pagebucket;
+ }
+ while (nbytes > amt + n) {
+ amt <<= 1;
+ if (amt == 0) {
+ pthread_mutex_unlock(mutex);
+ return (NULL);
+ }
+ bucket++;
+ }
+ ASSERT (bucket < NBUCKETS);
+ /*
+ * If nothing in hash bucket right now,
+ * request more memory from the system.
+ */
+ if ((op = nextf[bucket]) == NULL) {
+ morecore(bucket);
+ if ((op = nextf[bucket]) == NULL) {
+ pthread_mutex_unlock(mutex);
+ return (NULL);
+ }
+ }
+ /* remove from linked list */
+ nextf[bucket] = op->ov_next;
+ op->ov_magic = MAGIC;
+ op->ov_index = bucket;
+#ifdef RCHECK
+ /*
+ * Record allocated size of block and
+ * bound space with magic numbers.
+ */
+ op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
+ op->ov_rmagic = RMAGIC;
+ *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
+#endif
+ pthread_mutex_unlock(mutex);
+ return ((char *)(op + 1));
+}
+
+/* ==========================================================================
+ * free()
+ */
+void free(void *cp)
+{
+ pthread_mutex_t *mutex;
+ union overhead *op;
+ int size;
+
+ mutex = &malloc_mutex;
+ pthread_mutex_lock(mutex);
+ if (cp == NULL) {
+ pthread_mutex_unlock(mutex);
+ return;
+ }
+ op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
+#ifdef DEBUG
+ ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */
+#else
+ if (op->ov_magic != MAGIC) {
+ pthread_mutex_unlock(mutex);
+ return; /* sanity */
+ }
+#endif
+#ifdef RCHECK
+ ASSERT(op->ov_rmagic == RMAGIC);
+ ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
+#endif
+ size = op->ov_index;
+ ASSERT(size < NBUCKETS);
+ op->ov_next = nextf[size]; /* also clobbers ov_magic */
+ nextf[size] = op;
+
+ pthread_mutex_unlock(mutex);
+}
+
+/* ==========================================================================
+ * realloc()
+ *
+ * Storage compaction is no longer supported, fix program and try again.
+ */
+void *realloc(void *cp, size_t nbytes)
+{
+ pthread_mutex_t *mutex;
+ size_t onb;
+ size_t i;
+ union overhead *op;
+ char *res;
+
+ if (cp == NULL)
+ return (malloc(nbytes));
+ op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
+
+ if (op->ov_magic == MAGIC) {
+ i = op->ov_index;
+ } else {
+ /*
+ * This will cause old programs using storage compaction feature of
+ * realloc to break in a pseudo resonable way that is easy to debug.
+ * Returning a malloced buffer without the copy may cause
+ * indeterministic behavior.
+ */
+ return(NULL);
+ }
+
+ mutex = &malloc_mutex;
+ pthread_mutex_lock(mutex);
+ onb = 1L << (i + 3);
+ if (onb < pagesz)
+ onb -= sizeof (*op) + RSLOP;
+ else
+ onb += pagesz - sizeof (*op) - RSLOP;
+
+ /* avoid the copy if same size block */
+ if (i) {
+ i = 1L << (i + 2);
+ if (i < pagesz)
+ i -= sizeof (*op) + RSLOP;
+ else
+ i += pagesz - sizeof (*op) - RSLOP;
+ }
+
+ if (nbytes <= onb && nbytes > i) {
+#ifdef RCHECK
+ op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
+ *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
+#endif
+ pthread_mutex_unlock(mutex);
+ return(cp);
+ }
+ pthread_mutex_unlock(mutex);
+
+ if ((res = malloc(nbytes)) == NULL) {
+ free(cp);
+ return (NULL);
+ }
+
+ memcpy(res, cp, (nbytes < onb) ? nbytes : onb);
+ free(cp);
+
+ return (res);
+}
+
+/* ==========================================================================
+ * calloc()
+ *
+ * Added to ensure pthread's allocation is used (mevans).
+ */
+void *calloc(size_t nmemb, size_t size)
+{
+ void *p;
+ size *= nmemb;
+ p = malloc(size);
+ if (p) memset(p, 0, size);
+ return (p);
+}
diff --git a/mit-pthreads/pthreads/mutex.c b/mit-pthreads/pthreads/mutex.c
new file mode 100644
index 00000000000..1a2ca6fa1c1
--- /dev/null
+++ b/mit-pthreads/pthreads/mutex.c
@@ -0,0 +1,371 @@
+/* ==== mutex.c ==============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Mutex functions.
+ *
+ * 1.00 93/07/19 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <errno.h>
+
+/* ==========================================================================
+ * pthread_mutex_is_debug()
+ *
+ * Check that mutex is a debug mutex and if so returns entry number into
+ * array of debug mutexes.
+ */
+static int pthread_mutex_debug_count = 0;
+static pthread_mutex_t ** pthread_mutex_debug_ptrs = NULL;
+
+static inline int pthread_mutex_is_debug(pthread_mutex_t * mutex)
+{
+ int i;
+
+ for (i = 0; i < pthread_mutex_debug_count; i++) {
+ if (pthread_mutex_debug_ptrs[i] == mutex) {
+ return(i);
+ }
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * pthread_mutex_init()
+ *
+ * In this implementation I don't need to allocate memory.
+ * ENOMEM, EAGAIN should never be returned. Arch that have
+ * weird constraints may need special coding.
+ */
+int pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
+{
+ enum pthread_mutextype type;
+
+ /* Only check if attr specifies some mutex type other than fast */
+ if ((mutex_attr) && (mutex_attr->m_type != MUTEX_TYPE_FAST)) {
+ if (mutex_attr->m_type >= MUTEX_TYPE_MAX) {
+ return(EINVAL);
+ }
+ type = mutex_attr->m_type;
+ } else {
+ type = MUTEX_TYPE_FAST;
+ }
+ mutex->m_flags = 0;
+
+ pthread_sched_prevent();
+
+ switch(type) {
+ case MUTEX_TYPE_FAST:
+ break;
+ case MUTEX_TYPE_STATIC_FAST:
+ pthread_sched_resume();
+ return(EINVAL);
+ break;
+ case MUTEX_TYPE_COUNTING_FAST:
+ mutex->m_data.m_count = 0;
+ break;
+ case MUTEX_TYPE_DEBUG:
+ if (pthread_mutex_is_debug(mutex) == NOTOK) {
+ pthread_mutex_t ** new;
+
+ if ((new = (pthread_mutex_t **)realloc(pthread_mutex_debug_ptrs,
+ (pthread_mutex_debug_count + 1) * (sizeof(void *)))) == NULL) {
+ pthread_sched_resume();
+ return(ENOMEM);
+ }
+ pthread_mutex_debug_ptrs = new;
+ pthread_mutex_debug_ptrs[pthread_mutex_debug_count++] = mutex;
+ } else {
+ pthread_sched_resume();
+ return(EBUSY);
+ }
+ break;
+ default:
+ pthread_sched_resume();
+ return(EINVAL);
+ break;
+ }
+ /* Set all other paramaters */
+ pthread_queue_init(&mutex->m_queue);
+ mutex->m_flags |= MUTEX_FLAGS_INITED;
+ mutex->m_owner = NULL;
+ mutex->m_type = type;
+
+ pthread_sched_resume();
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_mutex_destroy()
+ */
+int pthread_mutex_destroy(pthread_mutex_t *mutex)
+{
+ int i;
+
+ pthread_sched_prevent();
+
+ /* Only check if mutex is of type other than fast */
+ switch(mutex->m_type) {
+ case MUTEX_TYPE_FAST:
+ break;
+ case MUTEX_TYPE_STATIC_FAST:
+ pthread_sched_resume();
+ return(EINVAL);
+ break;
+ case MUTEX_TYPE_COUNTING_FAST:
+ mutex->m_data.m_count = 0;
+ break;
+ case MUTEX_TYPE_DEBUG:
+ if ((i = pthread_mutex_is_debug(mutex)) == NOTOK) {
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+ if (mutex->m_owner) {
+ pthread_sched_resume();
+ return(EBUSY);
+ }
+
+ /* Remove the mutex from the list of debug mutexes */
+ pthread_mutex_debug_ptrs[i] =
+ pthread_mutex_debug_ptrs[--pthread_mutex_debug_count];
+ pthread_mutex_debug_ptrs[pthread_mutex_debug_count] = NULL;
+ break;
+ default:
+ pthread_sched_resume();
+ return(EINVAL);
+ break;
+ }
+
+ /* Cleanup mutex, others might want to use it. */
+ pthread_queue_init(&mutex->m_queue);
+ mutex->m_owner = NULL;
+ mutex->m_flags = 0;
+
+ pthread_sched_resume();
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_mutex_trylock()
+ */
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ int rval;
+
+ pthread_sched_prevent();
+ switch (mutex->m_type) {
+ /*
+ * Fast mutexes do not check for any error conditions.
+ */
+ case MUTEX_TYPE_FAST:
+ case MUTEX_TYPE_STATIC_FAST:
+ if (!mutex->m_owner) {
+ mutex->m_owner = pthread_run;
+ rval = OK;
+ } else {
+ rval = EBUSY;
+ }
+ break;
+ case MUTEX_TYPE_COUNTING_FAST:
+ if (mutex->m_owner) {
+ if (mutex->m_owner == pthread_run) {
+ mutex->m_data.m_count++;
+ rval = OK;
+ } else {
+ rval = EBUSY;
+ }
+ } else {
+ mutex->m_owner = pthread_run;
+ rval = OK;
+ }
+ break;
+ case MUTEX_TYPE_DEBUG:
+ if (pthread_mutex_is_debug(mutex) != NOTOK) {
+ if (!mutex->m_owner) {
+ mutex->m_owner = pthread_run;
+ rval = OK;
+ } else {
+ rval = EBUSY;
+ }
+ } else {
+ rval = EINVAL;
+ }
+ break;
+ default:
+ rval = EINVAL;
+ break;
+ }
+
+ pthread_sched_resume();
+ return(rval);
+}
+
+/* ==========================================================================
+ * pthread_mutex_lock()
+ */
+int pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ int rval;
+
+ pthread_sched_prevent();
+ switch (mutex->m_type) {
+ /*
+ * Fast mutexes do not check for any error conditions.
+ */
+ case MUTEX_TYPE_FAST:
+ case MUTEX_TYPE_STATIC_FAST:
+ if (mutex->m_owner) {
+ pthread_queue_enq(&mutex->m_queue, pthread_run);
+
+ /* Reschedule will unlock scheduler */
+ pthread_resched_resume(PS_MUTEX_WAIT);
+ return(OK);
+ }
+ mutex->m_owner = pthread_run;
+ rval = OK;
+ break;
+ case MUTEX_TYPE_COUNTING_FAST:
+ if (mutex->m_owner) {
+ if (mutex->m_owner != pthread_run) {
+ pthread_queue_enq(&mutex->m_queue, pthread_run);
+
+ /* Reschedule will unlock scheduler */
+ pthread_resched_resume(PS_MUTEX_WAIT);
+ return(OK);
+ } else {
+ mutex->m_data.m_count++;
+ }
+ } else {
+ mutex->m_owner = pthread_run;
+ }
+ rval = OK;
+ break;
+ case MUTEX_TYPE_DEBUG:
+ if (pthread_mutex_is_debug(mutex) != NOTOK) {
+ if (mutex->m_owner) {
+ if (mutex->m_owner != pthread_run) {
+ pthread_queue_enq(&mutex->m_queue, pthread_run);
+
+ /* Reschedule will unlock pthread_run */
+ pthread_resched_resume(PS_MUTEX_WAIT);
+
+ if (mutex->m_owner != pthread_run) {
+ PANIC();
+ }
+ return(OK);
+ }
+ rval = EDEADLK;
+ break;
+ }
+ mutex->m_owner = pthread_run;
+ rval = OK;
+ break;
+ }
+ rval = EINVAL;
+ break;
+ default:
+ rval = EINVAL;
+ break;
+ }
+
+ pthread_sched_resume();
+ return(rval);
+}
+
+/* ==========================================================================
+ * pthread_mutex_unlock()
+ */
+int pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ struct pthread *pthread;
+ int rval;
+
+ pthread_sched_prevent();
+
+ switch (mutex->m_type) {
+ /*
+ * Fast mutexes do not check for any error conditions.
+ */
+ case MUTEX_TYPE_FAST:
+ case MUTEX_TYPE_STATIC_FAST:
+ if (mutex->m_owner = pthread_queue_deq(&mutex->m_queue)) {
+
+ /* Reschedule will unlock scheduler */
+ pthread_sched_other_resume(mutex->m_owner);
+ return(OK);
+ }
+ rval = OK;
+ break;
+ case MUTEX_TYPE_COUNTING_FAST:
+ if (mutex->m_data.m_count) {
+ mutex->m_data.m_count--;
+ rval = OK;
+ break;
+ }
+ if (mutex->m_owner = pthread_queue_deq(&mutex->m_queue)) {
+
+ /* Reschedule will unlock scheduler */
+ pthread_sched_other_resume(mutex->m_owner);
+ return(OK);
+ }
+ rval = OK;
+ break;
+ case MUTEX_TYPE_DEBUG:
+ if (pthread_mutex_is_debug(mutex) != NOTOK) {
+ if (mutex->m_owner == pthread_run) {
+ if (mutex->m_owner = pthread_queue_deq(&mutex->m_queue)) {
+
+ /* Reschedule will unlock scheduler */
+ pthread_sched_other_resume(mutex->m_owner);
+ return(OK);
+ }
+ rval = OK;
+ } else {
+ rval = EPERM;
+ }
+ } else {
+ rval = EINVAL;
+ }
+ break;
+ default:
+ rval = EINVAL;
+ break;
+ }
+ pthread_sched_resume();
+ return(rval);
+}
diff --git a/mit-pthreads/pthreads/mutexattr.c b/mit-pthreads/pthreads/mutexattr.c
new file mode 100644
index 00000000000..d045b5041a0
--- /dev/null
+++ b/mit-pthreads/pthreads/mutexattr.c
@@ -0,0 +1,90 @@
+/* ==== mutexattr.c ===========================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Mutex functions.
+ *
+ * 1.00 93/07/19 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <errno.h>
+
+/* ==========================================================================
+ * pthread_mutexattr_init()
+ */
+int pthread_mutexattr_init(pthread_mutexattr_t *attr)
+{
+ attr->m_type = MUTEX_TYPE_FAST;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_mutexattr_destroy()
+ */
+int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
+{
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_mutexattr_settype()
+ */
+int pthread_mutexattr_settype(pthread_mutexattr_t *attr, unsigned int type)
+{
+ switch(type) {
+ case PTHREAD_MUTEXTYPE_FAST:
+ attr->m_type = MUTEX_TYPE_FAST;
+ break;
+ case PTHREAD_MUTEXTYPE_RECURSIVE:
+ attr->m_type = MUTEX_TYPE_COUNTING_FAST;
+ break;
+ case PTHREAD_MUTEXTYPE_DEBUG:
+ attr->m_type = MUTEX_TYPE_DEBUG;
+ break;
+ default:
+ return(EINVAL);
+ }
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_mutexattr_gettype()
+ */
+int pthread_mutexattr_gettype(pthread_mutexattr_t *attr, unsigned int * type)
+{
+ *type = (unsigned int)attr->m_type;
+ return(OK);
+}
diff --git a/mit-pthreads/pthreads/panic.c b/mit-pthreads/pthreads/panic.c
new file mode 100644
index 00000000000..6b963acd651
--- /dev/null
+++ b/mit-pthreads/pthreads/panic.c
@@ -0,0 +1,58 @@
+/* ==== panic.c =======================================================
+ * Copyright (c) 1996 by Larry V. Streepy, Jr.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Larry V. Streepy, Jr.
+ * 4. The name of Larry V. Streepy, Jr. may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Larry V. Streepy, Jr. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Larry V. Streepy, Jr. BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : pthread kernel panic
+ *
+ * 02 Oct 1996 - Larry V. Streepy, Jr.
+ * - Initial coding
+ */
+
+#include <pthread.h>
+#include <stdio.h>
+/*----------------------------------------------------------------------
+ * Function: panic_kernel
+ * Purpose: print a message and panic the pthreads kernel
+ * Args: file name, line number, and function
+ * Returns: doesn't
+ * Notes:
+ *----------------------------------------------------------------------*/ void
+panic_kernel( const char *file, unsigned int line, const char *func )
+{
+#ifdef __GNUC__
+ (void) fprintf( stderr, "%s:%u: %s%sPhtreads kernel panic.\n",
+ file, line, func ? func : "", func ? ": " : "" );
+ (void) fflush (stderr);
+#else
+ (void) fprintf( stderr, "%s:%u: Phtreads kernel panic.\n", file, line );
+ (void) fflush (stderr);
+#endif
+ abort();
+}
diff --git a/mit-pthreads/pthreads/prio_queue.c b/mit-pthreads/pthreads/prio_queue.c
new file mode 100644
index 00000000000..d976f9cd68f
--- /dev/null
+++ b/mit-pthreads/pthreads/prio_queue.c
@@ -0,0 +1,176 @@
+/* ==== prio_queue.c ==========================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Priority Queue functions.
+ *
+ * 1.00 94/09/19 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+
+/* A thread when it becomes eligeble to run is placed on the run queue.
+ This requires locking the kernel lock
+*/
+
+/* ==========================================================================
+ * pthread_prio_queue_init()
+ */
+void pthread_prio_queue_init(struct pthread_prio_queue * queue)
+{
+ int i;
+
+ for (i = 0; i <= PTHREAD_MAX_PRIORITY; i++) {
+ queue->level[i].first = NULL;
+ queue->level[i].last = NULL;
+ }
+ queue->next = NULL;
+ queue->data = NULL;
+}
+
+/* ==========================================================================
+ * pthread_priority_enq()
+ */
+void pthread_prio_queue_enq(struct pthread_prio_queue * queue,
+ struct pthread * pthread)
+{
+ int priority = pthread->pthread_priority;
+
+ if (queue->next) {
+ if (queue->level[priority].first) {
+ pthread->next = (queue->level[priority].last)->next;
+ (queue->level[priority].last)->next = pthread;
+ queue->level[priority].last = pthread;
+ return;
+ }
+ if (priority != PTHREAD_MAX_PRIORITY) {
+ int prev_priority;
+ /* Find first higher priority thread queued on queue */
+ for (prev_priority = priority + 1; prev_priority <=
+ PTHREAD_MAX_PRIORITY; prev_priority++) {
+ if (queue->level[prev_priority].first) {
+ pthread->next = (queue->level[prev_priority].last)->next;
+ (queue->level[prev_priority].last)->next = pthread;
+ queue->level[priority].first = pthread;
+ queue->level[priority].last = pthread;
+ return;
+ }
+ }
+ }
+ }
+ queue->level[priority].first = pthread;
+ queue->level[priority].last = pthread;
+ pthread->next = queue->next;
+ queue->next = pthread;
+}
+
+/* ==========================================================================
+ * pthread_prio_queue_deq()
+ */
+struct pthread * pthread_prio_queue_deq(struct pthread_prio_queue * queue)
+{
+ struct pthread * pthread;
+ int priority;
+
+ if (pthread = queue->next) {
+ priority = queue->next->pthread_priority;
+ if (queue->level[priority].first == queue->level[priority].last) {
+ queue->level[priority].first = NULL;
+ queue->level[priority].last = NULL;
+ } else {
+ queue->level[priority].first = pthread->next;
+ }
+ queue->next = pthread->next;
+ pthread->next = NULL;
+ }
+ return(pthread);
+}
+
+/* ==========================================================================
+ * pthread_prio_queue_remove()
+ */
+int pthread_prio_queue_remove(struct pthread_prio_queue *queue,
+ struct pthread *thread)
+{
+ /* XXX This is slow, should start with thread priority */
+ int priority = thread->pthread_priority;
+ struct pthread **current = &(queue->level[priority].first);
+ struct pthread *prev = NULL;
+
+ if (thread==*current) {
+ int current_priority=priority+1;
+
+ if (*current == queue->next){
+ pthread_prio_queue_deq(queue);
+ thread->next = NULL;
+ return(OK);
+ }
+ for (current_priority; current_priority <= PTHREAD_MAX_PRIORITY;
+ current_priority++) {
+ if (queue->level[current_priority].last) {
+ queue->level[current_priority].last->next = (*current)->next;
+ if ((*current)->next &&
+ (*current)->next->pthread_priority == priority)
+ queue->level[priority].first = (*current)->next;
+ else {
+ queue->level[priority].first = NULL;
+ queue->level[priority].last = NULL;
+ }
+ thread->next = NULL;
+ return(OK);
+ }
+ }
+ }
+
+ if (*current == NULL) /* Mati Sauks */
+ {
+ return (NOTOK);
+ }
+ for (prev=*current,current=&((*current)->next);
+ *current && ((*current)->pthread_priority == priority);
+ prev=*current,current=&((*current)->next)) {
+ if (*current == thread) {
+ if (*current == queue->level[priority].last) {
+ queue->level[priority].last = prev;
+ }
+
+ *current = (*current)->next;
+ thread->next=NULL;
+ return(OK);
+ }
+ }
+ return(NOTOK);
+}
+
diff --git a/mit-pthreads/pthreads/process.c b/mit-pthreads/pthreads/process.c
new file mode 100644
index 00000000000..9b3abb3384b
--- /dev/null
+++ b/mit-pthreads/pthreads/process.c
@@ -0,0 +1,208 @@
+/* ==== process.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Process functions (fork, exec, ...).
+ *
+ * 1.23 94/04/18 proven
+ * -Started coding this file.
+ */
+
+#include <pthread.h>
+#include <sys/types.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <unistd.h>
+#ifdef HAVE_ALLOC_H
+#include <alloc.h>
+#endif
+
+extern void *alloca();
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+/* ==========================================================================
+ * fork()
+ *
+ * This function requires a sig_prevent()/sig_check_and_resume() for the
+ * parent. The child never unlocks.
+ */
+pid_t fork()
+{
+ pid_t ret;
+
+ pthread_sched_prevent();
+
+ fd_kern_fork();
+ if (ret = machdep_sys_fork()) { /* Parent or error */
+ pthread_sched_resume();
+ } else { /* Child */
+ machdep_unset_thread_timer(NULL);
+ machdep_stop_timer(NULL);
+ fork_lock++;
+ pthread_kernel_lock--;
+ }
+ return(ret);
+}
+
+#ifdef HAVE_VFORK
+/* The semantics of vfork probably won't mix well with the pthread
+ library code. Don't even try. */
+pid_t vfork ()
+{
+ return fork ();
+}
+#endif
+
+/* ==========================================================================
+ * execve()
+ *
+ * This function requires a sig_prevent()/sig_check_and_resume() if one
+ * hasn't been done in the fork routine. Normally machdep_sys_execve()
+ * should never return.
+ */
+int execve(const char *name, char * const *argv, char * const *envp)
+{
+ int ret;
+
+ if (!fork_lock) {
+ pthread_sched_prevent();
+ fd_kern_exec(0);
+ ret = machdep_sys_execve(name, argv, envp);
+ pthread_sched_resume();
+ } else {
+ fd_kern_exec(1);
+ ret = machdep_sys_execve(name, argv, envp);
+ }
+ return(ret);
+}
+
+/* Variants of execve. Define them here so that the system versions
+ don't get used and drag in the system version of execve. */
+#include <sys/stat.h>
+#include <string.h>
+#include <sys/param.h>
+extern char **environ;
+
+static const char *find (const char *name, char *buf)
+{
+ char *p1, *p2;
+ extern char *getenv ();
+ struct stat sb;
+
+ if (strchr (name, '/'))
+ return name;
+ p1 = getenv ("PATH");
+ if (p1 == 0)
+ p1 = "/bin:/usr/bin:";
+ while (*p1) {
+ memset (buf, 0, MAXPATHLEN);
+ p2 = strchr (p1, ':');
+ if (p2 == 0)
+ p2 = p1 + strlen (p1);
+ strncpy (buf, p1, p2 - p1);
+ buf[p2 - p1] = 0;
+ strcat (buf, "/");
+ strcat (buf, name);
+ if (lstat (buf, &sb) == 0)
+ return buf;
+
+ if (*p2 == ':')
+ p2++;
+ p1 = p2;
+ }
+ return name;
+}
+
+int execl (const char *path, const char *arg, ...)
+{
+#ifdef SCO_3_5
+ return execve (path, (char *const *) &arg, environ);
+#else
+ char ** argv;
+ va_list ap;
+ int i;
+
+ va_start(ap, arg);
+ for (i = 1; va_arg(ap, char *) != NULL; i++);
+ va_end(ap);
+
+ argv = alloca (i * sizeof (char *));
+
+ va_start(ap, arg);
+ argv[0] = (char *) arg;
+ for (i = 1; (argv[i] = (char *) va_arg(ap, char *)) != NULL; i++);
+ va_end(ap);
+
+ return execve (path, argv, environ);
+#endif
+}
+
+int execlp (const char *name, const char *arg, ...)
+{
+#ifdef SCO_3_5
+ char buf[MAXPATHLEN];
+ return execve (find (name, buf), (char *const *) &arg, environ);
+#else
+ char buf[MAXPATHLEN];
+ char ** argv;
+ va_list ap;
+ int i;
+
+ va_start(ap, arg);
+ for (i = 1; va_arg(ap, char *) != NULL; i++);
+ va_end(ap);
+
+ argv = alloca (i * sizeof (char *));
+
+ va_start(ap, arg);
+ argv[0] = (char *) arg;
+ for (i = 1; (argv[i] = (char *) va_arg(ap, char *)) != NULL; i++);
+ va_end(ap);
+
+ return execve (find (name, buf), argv, environ);
+#endif
+}
+
+int execle (const char *name, const char *arg, ... /* , char *const envp[] */);
+
+/* This one turns on ptrace-style tracing? */
+int exect (const char *path, char *const argv[], char *const envp[]);
+
+int execv (const char *path, char *const argv[]) {
+ return execve (path, argv, environ);
+}
+
+int execvp (const char *name, char *const argv[]) {
+ char buf[MAXPATHLEN];
+ return execve (find (name, buf), argv, environ);
+}
diff --git a/mit-pthreads/pthreads/pthread.c b/mit-pthreads/pthreads/pthread.c
new file mode 100644
index 00000000000..6f7e2d53980
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread.c
@@ -0,0 +1,293 @@
+/* ==== pthread.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Pthread functions.
+ *
+ * 1.00 93/07/26 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <errno.h>
+#include <string.h>
+#include <sched.h>
+
+/* ==========================================================================
+ * sched_yield()
+ */
+int sched_yield()
+{
+ sig_handler_fake(SIGVTALRM);
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_yield()
+ */
+void pthread_yield()
+{
+ sig_handler_fake(SIGVTALRM);
+}
+
+/* ==========================================================================
+ * pthread_self()
+ */
+pthread_t pthread_self()
+{
+ return(pthread_run);
+}
+
+/* ==========================================================================
+ * pthread_equal()
+ */
+int pthread_equal(pthread_t t1, pthread_t t2)
+{
+ return(t1 == t2);
+}
+
+/* ==========================================================================
+ * pthread_exit()
+ */
+extern void pthread_cleanupspecific(void);
+
+void pthread_exit(void *status)
+{
+ pthread_t pthread;
+
+ /* Save return value */
+ pthread_run->ret = status;
+
+ /* First execute all cleanup handlers */
+ while (pthread_run->cleanup) {
+ pthread_cleanup_pop(1);
+ }
+
+ /* Don't forget the cleanup attr */
+ if (pthread_run->attr.cleanup_attr) {
+ pthread_run->attr.cleanup_attr(pthread_run->attr.arg_attr);
+ }
+
+ /* Next run thread-specific data desctructors */
+ if (pthread_run->specific_data) {
+ pthread_cleanupspecific();
+ }
+
+ pthread_sched_prevent();
+
+ if (!(pthread_run->attr.flags & PTHREAD_DETACHED)) {
+ /*
+ * Are there any threads joined to this one,
+ * if so wake them and let them detach this thread.
+ */
+ while (pthread = pthread_queue_deq(&(pthread_run->join_queue))) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread);
+ pthread->state = PS_RUNNING;
+ }
+ pthread_queue_enq(&pthread_dead_queue, pthread_run);
+ pthread_resched_resume(PS_DEAD);
+ } else {
+ pthread_queue_enq(&pthread_alloc_queue, pthread_run);
+ pthread_resched_resume(PS_UNALLOCED);
+ }
+
+ /* This thread will never run again */
+ PANIC();
+
+}
+
+/*----------------------------------------------------------------------
+ * Function: __pthread_is_valid
+ * Purpose: Scan the list of threads to see if a specified thread exists
+ * Args:
+ * pthread = The thread to scan for
+ * Returns:
+ * int = 1 if found, 0 if not
+ * Notes:
+ * The kernel is assumed to be locked
+ *----------------------------------------------------------------------*/
+int
+__pthread_is_valid( pthread_t pthread )
+{
+ int rtn = 0; /* Assume not found */
+ pthread_t t;
+
+ for( t = pthread_link_list; t; t = t->pll ) {
+ if( t == pthread ) {
+ rtn = 1; /* Found it */
+ break;
+ }
+ }
+
+ return rtn;
+}
+
+/* ==========================================================================
+ * __pthread_free()
+ */
+static inline void __pthread_free(pthread_t new_thread)
+{
+ pthread_sched_prevent();
+ new_thread->state = PS_UNALLOCED;
+ new_thread->attr.stacksize_attr = 0;
+ new_thread->attr.stackaddr_attr = NULL;
+ pthread_queue_enq(&pthread_alloc_queue, new_thread);
+ pthread_sched_resume();
+}
+/* ==========================================================================
+ * __pthread_alloc()
+ */
+/* static inline pthread_t __pthread_alloc(const pthread_attr_t *attr) */
+static pthread_t __pthread_alloc(const pthread_attr_t *attr)
+{
+ pthread_t thread;
+ void * stack;
+ void * old;
+
+ pthread_sched_prevent();
+ thread = pthread_queue_deq(&pthread_alloc_queue);
+ pthread_sched_resume();
+
+ if (thread) {
+ if (stack = attr->stackaddr_attr) {
+ __machdep_stack_repl(&(thread->machdep_data), stack);
+ } else {
+ if ((__machdep_stack_get(&(thread->machdep_data)) == NULL)
+ || (attr->stacksize_attr > thread->attr.stacksize_attr)) {
+ if (stack = __machdep_stack_alloc(attr->stacksize_attr)) {
+ __machdep_stack_repl(&(thread->machdep_data), stack);
+ } else {
+ __pthread_free(thread);
+ thread = NULL;
+ }
+ }
+ }
+ } else {
+ /* We should probable allocate several for efficiency */
+ if (thread = (pthread_t)malloc(sizeof(struct pthread))) {
+ /* Link new thread into list of all threads */
+
+ pthread_sched_prevent();
+ thread->state = PS_UNALLOCED;
+ thread->pll = pthread_link_list;
+ pthread_link_list = thread;
+ pthread_sched_resume();
+
+ if ((stack = attr->stackaddr_attr) ||
+ (stack = __machdep_stack_alloc(attr->stacksize_attr))) {
+ __machdep_stack_set(&(thread->machdep_data), stack);
+ } else {
+ __machdep_stack_set(&(thread->machdep_data), NULL);
+ __pthread_free(thread);
+ thread = NULL;
+ }
+ }
+ }
+ return(thread);
+}
+
+/* ==========================================================================
+ * pthread_create()
+ *
+ * After the new thread structure is allocated and set up, it is added to
+ * pthread_run_next_queue, which requires a sig_prevent(),
+ * sig_check_and_resume()
+ */
+int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
+ void * (*start_routine)(void *), void *arg)
+{
+ pthread_t new_thread;
+ int nsec = 100000000;
+ int retval = OK;
+
+ if (! attr)
+ attr = &pthread_attr_default;
+
+ if (new_thread = __pthread_alloc(attr)) {
+
+ __machdep_pthread_create(&(new_thread->machdep_data),
+ start_routine, arg, attr->stacksize_attr, nsec, 0);
+
+ memcpy(&new_thread->attr, attr, sizeof(pthread_attr_t));
+ if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
+ new_thread->pthread_priority = pthread_run->pthread_priority;
+ new_thread->attr.sched_priority = pthread_run->pthread_priority;
+ new_thread->attr.schedparam_policy =
+ pthread_run->attr.schedparam_policy;
+ } else {
+ new_thread->pthread_priority = new_thread->attr.sched_priority;
+ }
+
+ if (!(new_thread->attr.flags & PTHREAD_NOFLOAT)) {
+ machdep_save_float_state(new_thread);
+ }
+
+ /* Initialize signalmask */
+ new_thread->sigmask = pthread_run->sigmask;
+ sigemptyset(&(new_thread->sigpending));
+ new_thread->sigcount = 0;
+
+ pthread_queue_init(&(new_thread->join_queue));
+ new_thread->specific_data = NULL;
+ new_thread->specific_data_count = 0;
+ new_thread->cleanup = NULL;
+ new_thread->queue = NULL;
+ new_thread->next = NULL;
+ new_thread->flags = 0;
+
+ /* PTHREADS spec says we start with cancellability on and deferred */
+ SET_PF_CANCEL_STATE(new_thread, PTHREAD_CANCEL_ENABLE);
+ SET_PF_CANCEL_TYPE(new_thread, PTHREAD_CANCEL_DEFERRED);
+
+ new_thread->error_p = NULL;
+ new_thread->sll = NULL;
+
+ pthread_sched_prevent();
+
+
+ pthread_sched_other_resume(new_thread);
+ /*
+ * Assignment must be outside of the locked pthread kernel incase
+ * thread is a bogus address resulting in a seg-fault. We want the
+ * original thread to be capable of handling the resulting signal.
+ * --proven
+ */
+ (*thread) = new_thread;
+ } else {
+ retval = EAGAIN;
+ }
+ return(retval);
+}
diff --git a/mit-pthreads/pthreads/pthread_attr.c b/mit-pthreads/pthreads/pthread_attr.c
new file mode 100644
index 00000000000..5e1c0302227
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_attr.c
@@ -0,0 +1,255 @@
+/* ==== pthread_attr.c =======================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Pthread attribute functions.
+ *
+ * 1.00 93/11/04 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <errno.h>
+#include <string.h>
+
+/* Currently we do no locking, should we just to be safe? CAP */
+/* ==========================================================================
+ * pthread_attr_init()
+ */
+int pthread_attr_init(pthread_attr_t *attr)
+{
+ memcpy(attr, &pthread_attr_default, sizeof(pthread_attr_t));
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_destroy()
+ */
+int pthread_attr_destroy(pthread_attr_t *attr)
+{
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_getstacksize()
+ */
+int pthread_attr_getstacksize(pthread_attr_t *attr, size_t * stacksize)
+{
+ *stacksize = attr->stacksize_attr;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setstacksize()
+ */
+int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
+{
+ if (stacksize >= PTHREAD_STACK_MIN) {
+ attr->stacksize_attr = stacksize;
+ return(OK);
+ }
+ return(EINVAL);
+}
+
+/* ==========================================================================
+ * pthread_attr_getstackaddr()
+ */
+int pthread_attr_getstackaddr(pthread_attr_t *attr, void ** stackaddr)
+{
+ *stackaddr = attr->stackaddr_attr;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setstackaddr()
+ */
+int pthread_attr_setstackaddr(pthread_attr_t *attr, void * stackaddr)
+{
+ attr->stackaddr_attr = stackaddr;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setcleanup()
+ */
+int pthread_attr_setcleanup(pthread_attr_t *attr, void (*routine)(void *),
+ void * arg)
+{
+ attr->cleanup_attr = routine;
+ attr->arg_attr = arg;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_getdetachstate()
+ */
+int pthread_attr_getdetachstate(pthread_attr_t *attr, int * detachstate)
+{
+ *detachstate = attr->flags & PTHREAD_DETACHED;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setdetachstate()
+ */
+int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
+{
+ attr->flags = (attr->flags & ~(PTHREAD_DETACHED)) |
+ (detachstate & PTHREAD_DETACHED);
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_getfloatstate()
+ */
+int pthread_attr_getfloatstate(pthread_attr_t *attr, int * floatstate)
+{
+ *floatstate = attr->flags & PTHREAD_NOFLOAT;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setfloatstate()
+ */
+int pthread_attr_setfloatstate(pthread_attr_t *attr, int floatstate)
+{
+ attr->flags = (attr->flags & ~(PTHREAD_NOFLOAT)) |
+ (floatstate & PTHREAD_NOFLOAT);
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_getscope()
+ */
+int pthread_attr_getscope(pthread_attr_t *attr, int * contentionscope)
+{
+ *contentionscope = attr->flags & PTHREAD_SCOPE_SYSTEM;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setscope()
+ */
+int pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
+{
+ int ret;
+
+ switch (contentionscope) {
+ case PTHREAD_SCOPE_PROCESS:
+ attr->flags = (attr->flags & ~(PTHREAD_SCOPE_PROCESS))
+ | PTHREAD_SCOPE_PROCESS;
+ ret = OK;
+ break;
+ case PTHREAD_SCOPE_SYSTEM:
+ ret = ENOSYS;
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_attr_getinheritsched()
+ */
+int pthread_attr_getinheritsched(pthread_attr_t *attr, int * inheritsched)
+{
+ *inheritsched = attr->flags & PTHREAD_INHERIT_SCHED;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setinheritsched()
+ */
+int pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
+{
+ attr->flags = (attr->flags & ~(PTHREAD_INHERIT_SCHED)) |
+ (inheritsched & PTHREAD_INHERIT_SCHED);
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_getschedpolicy()
+ */
+int pthread_attr_getschedpolicy(pthread_attr_t *attr, int * schedpolicy)
+{
+ *schedpolicy = (int)attr->schedparam_policy;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setschedpolicy()
+ */
+int pthread_attr_setschedpolicy(pthread_attr_t *attr, int schedpolicy)
+{
+ int ret;
+
+ switch(schedpolicy) {
+ case SCHED_FIFO:
+ case SCHED_IO:
+ case SCHED_RR:
+ attr->schedparam_policy = schedpolicy;
+ ret = OK;
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_attr_getschedparam()
+ */
+int pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param * param)
+{
+ param->sched_priority = attr->sched_priority;
+ return(OK);
+}
+
+/* ==========================================================================
+ * pthread_attr_setschedparam()
+ */
+int pthread_attr_setschedparam(pthread_attr_t *attr, struct sched_param * param)
+{
+ if ((param->sched_priority >= PTHREAD_MIN_PRIORITY) &&
+ (param->sched_priority <= PTHREAD_MAX_PRIORITY)) {
+ attr->sched_priority = param->sched_priority;
+ return(OK);
+ }
+ return(EINVAL);
+}
+
diff --git a/mit-pthreads/pthreads/pthread_cancel.c b/mit-pthreads/pthreads/pthread_cancel.c
new file mode 100644
index 00000000000..4191a269027
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_cancel.c
@@ -0,0 +1,258 @@
+/* ==== pthread_cancel.c ====================================================
+ * Copyright (c) 1996 by Larry V. Streepy, Jr.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Larry V. Streepy, Jr.
+ * 4. The name of Larry V. Streepy, Jr. may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Larry V. Streepy, Jr. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Larry V. Streepy, Jr. BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : pthread_cancel operations
+ *
+ * 27 Sep 1996 - Larry V. Streepy, Jr.
+ * - Initial coding
+ */
+#ifndef lint
+static const char rcsid[] = "$Id:";
+#endif
+#include <pthread.h>
+#include <errno.h>
+static void possiblyMakeRunnable( pthread_t pthread );
+
+/*----------------------------------------------------------------------
+ * Function: pthread_cancel
+ * Purpose: Allows a thread to request that it or another thread
+ * terminate execution
+ * Args:
+ * thread = thread to mark as cancelled
+ * Returns:
+ * int 0 = ok, -1 = some error (see errno)
+ * Notes:
+ * The thread is simply marked as CANCELLED, it is up to the cancelled
+ * thread to decide how to handle it.
+ *----------------------------------------------------------------------*/ int
+pthread_cancel( pthread_t pthread )
+{
+ int rtn = 0; /* Assume all ok */
+ pthread_sched_prevent();
+ /* Ensure they gave us a legal pthread pointer */
+ if( ! __pthread_is_valid( pthread ) ) {
+ rtn = ESRCH; /* No such thread */
+ } else if( pthread->state == PS_UNALLOCED || pthread->state == PS_DEAD ) {
+ /* The standard doesn't call these out as errors, so return 0 */
+ rtn = 0;
+ } else {
+ SET_PF_CANCELLED(pthread); /* Set the flag */
+ /* If the thread is in the right state, then stick it on the
+ * run queue so it will get a chance to process the cancel.
+ */
+ if( pthread != pthread_run ) {
+ possiblyMakeRunnable( pthread );
+ }
+ }
+ pthread_sched_resume();
+ if( rtn == 0 )
+ pthread_testcancel(); /* See if we cancelled ourself */
+ return rtn;
+}
+
+/*----------------------------------------------------------------------
+ * Function: pthread_setcancelstate
+ * Purpose: Set the current thread's cancellability state
+ * Args:
+ * state = PTHREAD_CANCEL_DISABLE or PTHREAD_CANCEL_ENABLE
+ * oldstate= pointer to holder for old state or NULL (*MODIFIED*)
+ * Returns:
+ * int 0 = ok
+ * EINVAL = state is neither of the legal states
+ * Notes:
+ * This has to be async-cancel safe, so we prevent scheduling in
+ * here
+ *----------------------------------------------------------------------*/
+
+int
+pthread_setcancelstate( int newstate, int *oldstate )
+{
+ int ostate = TEST_PF_CANCEL_STATE(pthread_run);
+ int rtn = 0;
+ pthread_sched_prevent();
+ if( newstate == PTHREAD_CANCEL_ENABLE ||
+ newstate == PTHREAD_CANCEL_DISABLE ) {
+ SET_PF_CANCEL_STATE(pthread_run, newstate);
+ if( oldstate != NULL )
+ *oldstate = ostate;
+ } else { /* Invalid new state */
+ rtn = EINVAL;
+ }
+ pthread_sched_resume();
+ if( rtn == 0 ) {
+ /* Test to see if we have a pending cancel to handle */
+ pthread_testcancel();
+ }
+ return rtn;
+}
+
+/*----------------------------------------------------------------------
+ * Function: pthread_setcanceltype
+ * Purpose: Set the current thread's cancellability type
+ * Args:
+ * type = PTHREAD_CANCEL_DEFERRED or PTHREAD_CANCEL_ASYNCHRONOUS
+ * oldtype = pointer to holder for old type or NULL (*MODIFIED*)
+ * Returns:
+ * int 0 = ok
+ * EINVAL = type is neither of the legal states
+ * Notes:
+ * This has to be async-cancel safe, so we prevent scheduling in
+ * here
+ *----------------------------------------------------------------------*/
+
+int
+pthread_setcanceltype( int newtype, int *oldtype )
+{
+ int otype = TEST_PF_CANCEL_TYPE(pthread_run);
+ int rtn = 0;
+ pthread_sched_prevent();
+ if( newtype == PTHREAD_CANCEL_DEFERRED ||
+ newtype == PTHREAD_CANCEL_ASYNCHRONOUS) {
+ SET_PF_CANCEL_TYPE(pthread_run, newtype);
+ if( oldtype != NULL )
+ *oldtype = otype;
+ } else { /* Invalid new type */
+ rtn = EINVAL;
+ }
+ pthread_sched_resume();
+ if( rtn == 0 ) {
+ /* Test to see if we have a pending cancel to handle */
+ pthread_testcancel();
+ }
+ return rtn;
+}
+
+/*----------------------------------------------------------------------
+ * Function: pthread_testcancel
+ * Purpose: Requests delivery of a pending cancel to the current thread
+ * Args: void
+ * Returns: void
+ * Notes:
+ * If the current thread has been cancelled, this function will not
+ * return and the threads exit processing will be initiated.
+ *----------------------------------------------------------------------*/
+
+void
+pthread_testcancel( void )
+{
+ if( TEST_PF_CANCEL_STATE(pthread_run) == PTHREAD_CANCEL_DISABLE ) {
+ return; /* Can't be cancelled */
+ }
+ /* Ensure that we aren't in the process of exiting already */
+ if( TEST_PF_RUNNING_TO_CANCEL(pthread_run) )
+ return;
+
+ /* See if we have been cancelled */
+ if( TEST_PF_CANCELLED(pthread_run) ) {
+ /* Set this flag to avoid recursively calling pthread_exit */
+ SET_PF_RUNNING_TO_CANCEL(pthread_run);
+ pthread_exit( PTHREAD_CANCELLED ); /* Easy - just call pthread_exit */
+ }
+ return; /* Not cancelled */
+}
+
+/*----------------------------------------------------------------------
+ * Function: pthread_cancel_internal
+ * Purpose: An internal routine to begin the cancel processing
+ * Args: freelocks = do we need to free locks before exiting
+ * Returns: void
+ * Notes:
+ * This routine is called from pthread_resched_resume
+ * prior to a context switch, and after a thread has resumed.
+ *
+ * The kernel must *NOT* be locked on entry here
+ *----------------------------------------------------------------------*/
+
+void
+pthread_cancel_internal( int freelocks )
+{
+ pthread_sched_prevent(); /* gotta stay focused */
+ /* Since we can be called from pthread_resched_resume(), our
+ * state is currently not PS_RUNNING. Since we side stepped
+ * the actually blocking, we need to be removed from the queue
+ * and marked as running.
+ */
+ if( pthread_run->state != PS_RUNNING ) {
+ if( pthread_run->queue == NULL ) {
+ PANIC(); /* Must be on a queue */
+ }
+ /* We MUST NOT put the thread on the prio_queue here. It
+ * is already running (although it's state has changed) and if we
+ * put it on the run queue, it will get resumed after it is dead
+ * and we end up with a nice panic.
+ */
+ pthread_queue_remove(pthread_run->queue, pthread_run);
+ pthread_run->state = PS_RUNNING; /* we are running */
+ }
+ /* Set this flag to avoid recursively calling pthread_exit */
+ SET_PF_RUNNING_TO_CANCEL(pthread_run);
+ /* Free up any locks we hold if told to. */
+ if( freelocks ) {
+ fd_unlock_for_cancel();
+ }
+ pthread_sched_resume();
+ pthread_exit( PTHREAD_CANCELLED ); /* Easy - just call pthread_exit */
+}
+
+/*----------------------------------------------------------------------
+ * Function: possiblyMakeRunnable
+ * Purpose: Make a thread runnable so it can be cancelled if state allows
+ * Args:
+ * pthread = thread to process
+ * Returns:
+ * Notes:
+ *----------------------------------------------------------------------*/
+
+static void
+possiblyMakeRunnable( pthread_t pthread )
+{
+ if( ! TEST_PTHREAD_IS_CANCELLABLE(pthread) )
+ return; /* Not currently cancellable */
+ /* If the thread is currently runnable, then we just let things
+ * take their course when it is next resumed.
+ */
+ if( pthread->state == PS_RUNNING )
+ return; /* will happen at context switch */
+ /* If the thread is sleeping, the it isn't on a queue. */
+ if( pthread->state == PS_SLEEP_WAIT ) {
+ sleep_cancel( pthread ); /* Remove from sleep list */
+ } else {
+ /* Otherwise, we need to take it off the queue and make it runnable */
+ if( pthread->queue == NULL ) {
+ PANIC(); /* Must be on a queue */
+ }
+ pthread_queue_remove(pthread->queue, pthread);
+ }
+ /* And make it runnable */
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread);
+ pthread->old_state = pthread->state;
+ pthread->state = PS_RUNNING;
+}
diff --git a/mit-pthreads/pthreads/pthread_detach.c b/mit-pthreads/pthreads/pthread_detach.c
new file mode 100644
index 00000000000..d3ae8c03bb3
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_detach.c
@@ -0,0 +1,92 @@
+/* ==== pthread_detach.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : pthread_join function.
+ *
+ * 1.00 94/01/15 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <errno.h>
+#include <pthread.h>
+
+/* ==========================================================================
+ * pthread_detach()
+ */
+int pthread_detach(pthread_t pthread)
+{
+ struct pthread * next_thread, * high_thread, * low_thread;
+ int ret;
+
+ pthread_sched_prevent();
+
+ /* Check that thread isn't detached already */
+ if (!(pthread->attr.flags & PTHREAD_DETACHED)) {
+
+ pthread->attr.flags |= PTHREAD_DETACHED;
+
+ /* Wakeup all threads waiting on a join */
+ if (next_thread = pthread_queue_deq(&(pthread->join_queue))) {
+ high_thread = next_thread;
+
+ while (next_thread = pthread_queue_deq(&(pthread->join_queue))) {
+ if (high_thread->pthread_priority < next_thread->pthread_priority) {
+ low_thread = high_thread;
+ high_thread = next_thread;
+ } else {
+ low_thread = next_thread;
+ }
+ pthread_prio_queue_enq(pthread_current_prio_queue, low_thread);
+ low_thread->state = PS_RUNNING;
+ }
+ /* If the thread is dead then move it to the alloc queue */
+ if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) {
+ pthread_queue_enq(&pthread_alloc_queue, pthread);
+ }
+ pthread_sched_other_resume(high_thread);
+ return(OK);
+ }
+ /* If the thread is dead then move it to the alloc queue */
+ if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) {
+ pthread_queue_enq(&pthread_alloc_queue, pthread);
+ pthread->state = PS_UNALLOCED;
+ }
+ ret = OK;
+ } else {
+ ret = ESRCH;
+ }
+ pthread_sched_resume();
+ return(ret);
+}
diff --git a/mit-pthreads/pthreads/pthread_init.c b/mit-pthreads/pthreads/pthread_init.c
new file mode 100644
index 00000000000..83e19fe0229
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_init.c
@@ -0,0 +1,135 @@
+/* ==== pthread_init.c ========================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Pthread_init routine.
+ *
+ * 1.00 94/09/20 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * errno is declared here to prevent the linker from pulling in errno
+ * from the C library (and whatever else is in that file). I also use
+ * errno as the default location for error numbers for the initial thread
+ * giving some backwards compatibility.
+ */
+#ifdef errno
+#undef errno
+#endif
+
+#if !defined(M_UNIX)
+int errno;
+#else
+extern int errno;
+#endif
+
+/* ==========================================================================
+ * pthread_init()
+ *
+ * We use features of the C++ linker to make sure this function is called
+ * before anything else is done in the program. See init.cc.
+ */
+void pthread_init(void)
+{
+ struct machdep_pthread machdep_data = MACHDEP_PTHREAD_INIT;
+
+ /* Only call this once */
+ if (pthread_initial) {
+ return;
+ }
+
+ pthread_pagesize = getpagesize();
+
+ /* Initialize the first thread */
+ if ((pthread_initial = (pthread_t)malloc(sizeof(struct pthread))) &&
+ (pthread_current_prio_queue = (struct pthread_prio_queue *)
+ malloc(sizeof(struct pthread_prio_queue)))) {
+ memcpy(&(pthread_initial->machdep_data), &machdep_data,
+ sizeof(machdep_data));
+ memcpy(&pthread_initial->attr, &pthread_attr_default,
+ sizeof(pthread_attr_t));
+
+ pthread_initial->pthread_priority = PTHREAD_DEFAULT_PRIORITY;
+ pthread_initial->state = PS_RUNNING;
+
+ pthread_queue_init(&(pthread_initial->join_queue));
+ pthread_initial->specific_data = NULL;
+ pthread_initial->specific_data_count = 0;
+ pthread_initial->cleanup = NULL;
+ pthread_initial->queue = NULL;
+ pthread_initial->next = NULL;
+ pthread_initial->flags = 0;
+ pthread_initial->pll = NULL;
+ pthread_initial->sll = NULL;
+
+ /* PTHREADS spec says we start with cancellability on and deferred */
+ SET_PF_CANCEL_STATE(pthread_initial, PTHREAD_CANCEL_ENABLE);
+ SET_PF_CANCEL_TYPE(pthread_initial, PTHREAD_CANCEL_DEFERRED);
+
+
+ /* Ugly errno hack */
+ pthread_initial->error_p = &errno;
+ pthread_initial->error = 0;
+
+ pthread_prio_queue_init(pthread_current_prio_queue);
+ pthread_link_list = pthread_initial;
+ pthread_run = pthread_initial;
+
+ uthread_sigmask = &(pthread_run->sigmask);
+
+ /* XXX can I assume the mask and pending siganl sets are empty. */
+ sigemptyset(&(pthread_initial->sigpending));
+ sigemptyset(&(pthread_initial->sigmask));
+ pthread_initial->sigcount = 0;
+
+ /* Initialize the signal handler. */
+ sig_init();
+
+ /* Initialize the fd table. */
+ fd_init();
+
+ /* Start the scheduler */
+ machdep_set_thread_timer(&(pthread_run->machdep_data));
+#ifdef M_UNIX
+ machdep_sys_init();
+#endif
+ return;
+ }
+ PANIC();
+}
diff --git a/mit-pthreads/pthreads/pthread_join.c b/mit-pthreads/pthreads/pthread_join.c
new file mode 100644
index 00000000000..879250020a1
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_join.c
@@ -0,0 +1,139 @@
+/* ==== pthread_join.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : pthread_join function.
+ *
+ * 1.00 94/01/15 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <errno.h>
+
+static int testDeadlock( struct pthread_queue *queue, pthread_t target );
+
+/* ==========================================================================
+ * pthread_join()
+ */
+int pthread_join(pthread_t pthread, void **thread_return)
+{
+ int ret;
+
+ pthread_sched_prevent();
+
+ /* Ensure they gave us a legal pthread pointer */
+ if( ! __pthread_is_valid( pthread ) ) {
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+
+ /* Check that thread isn't detached already */
+ if (pthread->attr.flags & PTHREAD_DETACHED) {
+ pthread_sched_resume();
+ return(ESRCH);
+ }
+
+ /*
+ * Now check if other thread has exited
+ * Note: This must happen after checking detached state.
+ */
+ if (pthread_queue_remove(&pthread_dead_queue, pthread) != OK) {
+
+ /* Before we pend on the join, ensure there is no dead lock */
+
+ if( testDeadlock( &pthread_run->join_queue, pthread ) == NOTOK ) {
+ ret = EDEADLK;
+ } else {
+ pthread_queue_enq(&(pthread->join_queue), pthread_run);
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ pthread_resched_resume(PS_JOIN);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+ pthread_sched_prevent();
+
+ if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) {
+ pthread_queue_enq(&pthread_alloc_queue, pthread);
+ pthread->attr.flags |= PTHREAD_DETACHED;
+ pthread->state = PS_UNALLOCED;
+ if (thread_return) {
+ *thread_return = pthread->ret;
+ }
+ ret = OK;
+ } else {
+ ret = ESRCH;
+ }
+ }
+ } else {
+ /* Just get the return value and detach the thread */
+ pthread_queue_enq(&pthread_alloc_queue, pthread);
+ pthread->attr.flags |= PTHREAD_DETACHED;
+ pthread->state = PS_UNALLOCED;
+ if (thread_return) {
+ *thread_return = pthread->ret;
+ }
+ ret = OK;
+ }
+ pthread_sched_resume();
+ return(ret);
+}
+
+/*----------------------------------------------------------------------
+ * Function: testDeadlock
+ * Purpose: recursive queue walk to check for deadlocks
+ * Args:
+ * queue = the queue to walk
+ * pthread = target to scan for
+ * Returns:
+ * OK = no deadlock, NOTOK = deadlock
+ * Notes:
+ *----------------------------------------------------------------------*/
+static int
+testDeadlock( struct pthread_queue *queue, pthread_t target )
+{
+ pthread_t t;
+
+ if( queue == NULL )
+ return OK; /* Empty queue, obviously ok */
+
+ for( t = queue->q_next; t; t = t->next ) {
+ if( t == target )
+ return NOTOK; /* bang, your dead */
+
+ if( testDeadlock( &t->join_queue, target ) == NOTOK ) {
+ return NOTOK;
+ }
+ }
+
+ return OK; /* No deadlock */
+}
diff --git a/mit-pthreads/pthreads/pthread_kill.c b/mit-pthreads/pthreads/pthread_kill.c
new file mode 100644
index 00000000000..9e3e61488a3
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_kill.c
@@ -0,0 +1,93 @@
+/* ==== pthread_kill.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : pthread_kill function.
+ *
+ * 1.32 94/06/12 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+
+/* Defined in sig.c, a linked list of threads currently
+ * blocked in sigwait(): */
+extern struct pthread * pthread_sigwait;
+
+
+/* ==========================================================================
+ * pthread_kill()
+ */
+int pthread_kill(struct pthread * pthread, int sig)
+{
+ struct pthread ** pthread_ptr;
+
+ pthread_sched_prevent();
+
+ /* Check who is the current owner of pthread */
+/* if (pthread->kthread != pthread_run->kthread) { */
+ if (0) {
+ } else {
+ if (pthread->state == PS_SIGWAIT) {
+ if(sigismember(pthread->data.sigwait, sig)) {
+ for (pthread_ptr = &pthread_sigwait;
+ (*pthread_ptr);
+ pthread_ptr = &((*pthread_ptr)->next)) {
+ if ((*pthread_ptr) == pthread) {
+
+ /* Take the thread out of the
+ * pthread_sigwait linked list: */
+ *pthread_ptr=(*pthread_ptr)->next;
+
+ *(int *)(pthread->ret) = sig;
+ pthread_sched_other_resume(pthread);
+ return(OK);
+ }
+ }
+ /* A thread should not be in the state PS_SIGWAIT
+ * without being in the pthread_sigwait linked
+ * list: */
+ PANIC();
+ }
+ }
+ if (!sigismember(&pthread->sigpending,sig)) /* Added by monty */
+ {
+ sigaddset(&(pthread->sigpending), sig);
+ pthread->sigcount++;
+ }
+ }
+
+ pthread_sched_resume();
+ return(OK);
+}
diff --git a/mit-pthreads/pthreads/pthread_once.c b/mit-pthreads/pthreads/pthread_once.c
new file mode 100644
index 00000000000..0a3dcd23fae
--- /dev/null
+++ b/mit-pthreads/pthreads/pthread_once.c
@@ -0,0 +1,59 @@
+/* ==== pthread_once.c =======================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : pthread_once function.
+ *
+ * 1.00 93/12/12 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+
+/* ==========================================================================
+ * pthread_once()
+ */
+int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
+{
+ /* Check first for speed */
+ if (once_control->state == PTHREAD_NEEDS_INIT) {
+ pthread_mutex_lock(&(once_control->mutex));
+ if (once_control->state == PTHREAD_NEEDS_INIT) {
+ init_routine();
+ once_control->state = PTHREAD_DONE_INIT;
+ }
+ pthread_mutex_unlock(&(once_control->mutex));
+ }
+ return(OK);
+}
diff --git a/mit-pthreads/pthreads/queue.c b/mit-pthreads/pthreads/queue.c
new file mode 100644
index 00000000000..c33774bf4dd
--- /dev/null
+++ b/mit-pthreads/pthreads/queue.c
@@ -0,0 +1,143 @@
+/* ==== queue.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Queue functions.
+ *
+ * 1.00 93/07/15 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+
+/*
+ * All routines in this file assume that the queue has been appropriatly
+ * locked.
+ */
+
+/* ==========================================================================
+ * pthread_queue_init()
+ */
+void pthread_queue_init(struct pthread_queue *queue)
+{
+ queue->q_next = NULL;
+ queue->q_last = NULL;
+ queue->q_data = NULL;
+}
+
+/* ==========================================================================
+ * pthread_queue_enq()
+ */
+void pthread_queue_enq(struct pthread_queue *queue, struct pthread *thread)
+{
+ if (queue->q_last) {
+ queue->q_last->next = thread;
+ } else {
+ queue->q_next = thread;
+ }
+ queue->q_last = thread;
+ thread->queue = queue;
+ thread->next = NULL;
+
+}
+
+/* ==========================================================================
+ * pthread_queue_get()
+ */
+struct pthread *pthread_queue_get(struct pthread_queue *queue)
+{
+ return(queue->q_next);
+}
+
+/* ==========================================================================
+ * pthread_queue_deq()
+ */
+struct pthread *pthread_queue_deq(struct pthread_queue *queue)
+{
+ struct pthread *thread = NULL;
+
+ if (queue->q_next) {
+ thread = queue->q_next;
+ if (!(queue->q_next = queue->q_next->next)) {
+ queue->q_last = NULL;
+ }
+ thread->queue = NULL;
+ thread->next = NULL;
+ }
+ return(thread);
+}
+
+/* ==========================================================================
+ * pthread_queue_remove()
+ */
+int pthread_queue_remove(struct pthread_queue *queue, struct pthread *thread)
+{
+ struct pthread **current = &(queue->q_next);
+ struct pthread *prev = NULL;
+ int ret = NOTOK;
+
+ while (*current) {
+ if (*current == thread) {
+ if ((*current)->next) {
+ *current = (*current)->next;
+ } else {
+ queue->q_last = prev;
+ *current = NULL;
+ }
+ thread->queue = NULL;
+ thread->next = NULL;
+ ret = OK;
+ break;
+ }
+ prev = *current;
+ current = &((*current)->next);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_llist_remove()
+ */
+int pthread_llist_remove(struct pthread **llist, struct pthread *thread)
+{
+ while (*llist) {
+ if (*llist == thread) {
+ *llist = thread->next;
+ return(OK);
+ }
+ llist = &(*llist)->next;
+ }
+ return(NOTOK);
+}
+
diff --git a/mit-pthreads/pthreads/readv.c b/mit-pthreads/pthreads/readv.c
new file mode 100644
index 00000000000..fd63d31cf94
--- /dev/null
+++ b/mit-pthreads/pthreads/readv.c
@@ -0,0 +1,85 @@
+/* ==== readv.c ============================================================
+ * Copyright (c) 1995 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Implementation of readv().
+ *
+ * 1.00 95/06/19 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include "config.h"
+
+#ifndef HAVE_SYSCALL_READV
+
+#include <errno.h>
+#include <unistd.h>
+#include <sys/uio.h>
+#include <sys/types.h>
+
+/* ==========================================================================
+ * machdep_sys_readv()
+ */
+int machdep_sys_readv(int fd, struct iovec * vector, int count)
+{
+ size_t bytes, i;
+ char *buffer;
+ int ret = 0;
+
+ /* Find the total number of bytes to be read. */
+ for (bytes = 0, i = 0; i < count; ++i)
+ bytes += vector[i].iov_len;
+
+ if (bytes) {
+ /*
+ * Allocate a temporary buffer to hold the data.
+ * Don't use alloca because threads tend to have smaller stacks.
+ */
+ if ((buffer = (char *)malloc(bytes)) == NULL) {
+ return(-ENOMEM);
+ }
+ ret = (int)machdep_sys_read(fd, buffer, bytes);
+
+ /* Copy the data from memory specified by VECTOR to BUFFER */
+ for (i = 0, bytes = 0; ret > 0; ret -= vector[i].iov_len) {
+ memcpy(vector[i].iov_base, buffer + bytes,
+ ret > vector[i].iov_len ? vector[i].iov_len : ret);
+ bytes += vector[i].iov_len;
+ }
+ free(buffer);
+ }
+ return(ret);
+}
+
+#endif
diff --git a/mit-pthreads/pthreads/schedparam.c b/mit-pthreads/pthreads/schedparam.c
new file mode 100644
index 00000000000..b4b28577022
--- /dev/null
+++ b/mit-pthreads/pthreads/schedparam.c
@@ -0,0 +1,170 @@
+/* ==== schedparam.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Pthread schedparam functions.
+ *
+ * 1.38 94/06/15 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <sched.h>
+#include <errno.h>
+
+/* ==========================================================================
+ * sched_get_priority_max
+ */
+int sched_get_priority_max(int policy)
+{
+ return PTHREAD_MAX_PRIORITY;
+}
+
+/* ==========================================================================
+ * sched_get_priority_min
+ */
+int sched_get_priority_min(int policy)
+{
+ return PTHREAD_MIN_PRIORITY;
+}
+
+/* Currently only policy is supported */
+/* ==========================================================================
+ * pthread_setschedparam()
+ */
+int pthread_setschedparam(pthread_t pthread, int policy,
+ struct sched_param * param)
+{
+ enum schedparam_policy new_policy, old_policy;
+ int ret = OK;
+ int prio;
+
+ new_policy = policy;
+ pthread_sched_prevent();
+ old_policy = pthread->attr.schedparam_policy;
+
+ if (param) {
+ if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ pthread_sched_resume();
+ return(EINVAL);
+ }
+ prio = param->sched_priority;
+ } else {
+ prio = pthread->pthread_priority;
+ }
+
+ if (pthread == pthread_run) {
+ switch(new_policy) {
+ case SCHED_RR:
+ pthread->attr.schedparam_policy = new_policy;
+ switch (old_policy) {
+ case SCHED_FIFO:
+ machdep_unset_thread_timer(NULL);
+ default:
+ pthread->pthread_priority = prio;
+ break;
+ }
+ break;
+ case SCHED_FIFO:
+ pthread->attr.schedparam_policy = new_policy;
+ switch (old_policy) {
+ case SCHED_IO:
+ case SCHED_RR:
+ if (pthread->pthread_priority < prio) {
+ pthread->pthread_priority = prio;
+ pthread_sched_resume();
+ pthread_yield();
+ return(OK);
+ }
+ default:
+ pthread->pthread_priority = prio;
+ break;
+ }
+ break;
+ case SCHED_IO:
+ pthread->attr.schedparam_policy = new_policy;
+ switch (old_policy) {
+ case SCHED_FIFO:
+ machdep_unset_thread_timer(NULL);
+ default:
+ pthread->pthread_priority = prio;
+ break;
+ }
+ break;
+ default:
+ SET_ERRNO(EINVAL);
+ ret = EINVAL;
+ break;
+ }
+ } else {
+ switch(new_policy) {
+ case SCHED_FIFO:
+ case SCHED_IO:
+ case SCHED_RR:
+ if(pthread_prio_queue_remove(pthread_current_prio_queue,pthread) == OK) {
+ pthread->attr.schedparam_policy = new_policy;
+ pthread->pthread_priority = prio;
+ pthread_sched_other_resume(pthread);
+ } else {
+ pthread->attr.schedparam_policy = new_policy;
+ pthread->pthread_priority = prio;
+ pthread_sched_resume();
+ }
+ return(OK);
+ break;
+ default:
+ SET_ERRNO(EINVAL);
+ ret = EINVAL;
+ break;
+ }
+ }
+
+ pthread_sched_resume();
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_getschedparam()
+ */
+int pthread_getschedparam(pthread_t pthread, int * policy,
+ struct sched_param * param)
+{
+ *policy = pthread->attr.schedparam_policy;
+ if (param) {
+ param->sched_priority = pthread->pthread_priority;
+ }
+ return(OK);
+}
+
diff --git a/mit-pthreads/pthreads/select.c b/mit-pthreads/pthreads/select.c
new file mode 100644
index 00000000000..eaafce31f19
--- /dev/null
+++ b/mit-pthreads/pthreads/select.c
@@ -0,0 +1,255 @@
+/* ==== select.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This code based on code contributed by
+ * Peter Hofmann <peterh@prz.tu-berlin.d400.de>
+ *
+ * Description : Select.
+ *
+ * 1.23 94/04/26 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <string.h>
+#include <errno.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+extern struct pthread_queue fd_wait_select;
+static struct timeval zero_timeout = { 0, 0 }; /* Moved by monty */
+
+/* ==========================================================================
+ * select()
+ */
+int select(int numfds, fd_set *readfds, fd_set *writefds,
+ fd_set *exceptfds, struct timeval *timeout)
+{
+ fd_set real_exceptfds, real_readfds, real_writefds; /* mapped fd_sets */
+ fd_set * real_readfds_p, * real_writefds_p, * real_exceptfds_p;
+ fd_set read_locks, write_locks, rdwr_locks;
+ struct timespec timeout_time, current_time;
+ int i, j, ret = 0, got_all_locks = 1;
+ struct pthread_select_data data;
+
+ if (numfds > dtablesize) {
+ numfds = dtablesize;
+ }
+
+ data.nfds = 0;
+ FD_ZERO(&data.readfds);
+ FD_ZERO(&data.writefds);
+ FD_ZERO(&data.exceptfds);
+
+ /* Do this first */
+ if (timeout) {
+ machdep_gettimeofday(&current_time);
+ timeout_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
+ if ((timeout_time.tv_nsec = current_time.tv_nsec +
+ (timeout->tv_usec * 1000)) > 1000000000) {
+ timeout_time.tv_nsec -= 1000000000;
+ timeout_time.tv_sec++;
+ }
+ }
+
+ FD_ZERO(&read_locks);
+ FD_ZERO(&write_locks);
+ FD_ZERO(&rdwr_locks);
+ FD_ZERO(&real_readfds);
+ FD_ZERO(&real_writefds);
+ FD_ZERO(&real_exceptfds);
+
+ /* lock readfds */
+ if (readfds || writefds || exceptfds) {
+ for (i = 0; i < numfds; i++) {
+ if ((readfds && (FD_ISSET(i, readfds))) ||
+ (exceptfds && FD_ISSET(i, exceptfds))) {
+ if (writefds && FD_ISSET(i ,writefds)) {
+ if ((ret = fd_lock(i, FD_RDWR, NULL)) != OK) {
+ got_all_locks = 0;
+ break;
+ }
+ FD_SET(i, &rdwr_locks);
+ FD_SET(fd_table[i]->fd.i,&real_writefds);
+ } else {
+ if ((ret = fd_lock(i, FD_READ, NULL)) != OK) {
+ got_all_locks = 0;
+ break;
+ }
+ FD_SET(i, &read_locks);
+ }
+ if (readfds && FD_ISSET(i,readfds)) {
+ FD_SET(fd_table[i]->fd.i, &real_readfds);
+ }
+ if (exceptfds && FD_ISSET(i,exceptfds)) {
+ FD_SET(fd_table[i]->fd.i, &real_exceptfds);
+ }
+ if (fd_table[i]->fd.i >= data.nfds) {
+ data.nfds = fd_table[i]->fd.i + 1;
+ }
+ } else {
+ if (writefds && FD_ISSET(i, writefds)) {
+ if ((ret = fd_lock(i, FD_WRITE, NULL)) != OK) {
+ got_all_locks = 0;
+ break;
+ }
+ FD_SET(i, &write_locks);
+ FD_SET(fd_table[i]->fd.i,&real_writefds);
+ if (fd_table[i]->fd.i >= data.nfds) {
+ data.nfds = fd_table[i]->fd.i + 1;
+ }
+ }
+ }
+ }
+ }
+
+ if (got_all_locks)
+ {
+ memcpy(&data.readfds,&real_readfds,sizeof(fd_set));
+ memcpy(&data.writefds,&real_writefds,sizeof(fd_set));
+ memcpy(&data.exceptfds,&real_exceptfds,sizeof(fd_set));
+
+ real_readfds_p = (readfds == NULL) ? NULL : &real_readfds;
+ real_writefds_p = (writefds == NULL) ? NULL : &real_writefds;
+ real_exceptfds_p = (exceptfds == NULL) ? NULL : &real_exceptfds;
+
+ pthread_run->sighandled=0;
+ if ((ret = machdep_sys_select(data.nfds, real_readfds_p,
+ real_writefds_p, real_exceptfds_p,
+ &zero_timeout)) == OK) {
+ pthread_sched_prevent();
+
+ real_exceptfds_p = (exceptfds == NULL) ? NULL : &data.exceptfds;
+ real_writefds_p = (writefds == NULL) ? NULL : &data.writefds;
+ real_readfds_p = (readfds == NULL) ? NULL : &data.readfds;
+
+ pthread_queue_enq(&fd_wait_select, pthread_run);
+ pthread_run->data.select_data = &data;
+ SET_PF_WAIT_EVENT(pthread_run);
+
+ if (timeout) {
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, &timeout_time);
+
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_SELECT_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+
+ /* We're awake */
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ ret = OK;
+ }
+ else
+ {
+ int count = 0;
+ for (i = 0; i < numfds; i++)
+ {
+ if (real_readfds_p && (FD_ISSET(i, real_readfds_p)))
+ count++;
+ if (real_writefds_p && (FD_ISSET(i, real_writefds_p)))
+ count++;
+ if (real_exceptfds_p && (FD_ISSET(i, real_exceptfds_p)))
+ count++;
+ }
+ ret = count;
+ }
+ /* Moving this after the sleep_cancel() seemed
+ * to fix intermittent crashes during heavy
+ * socket use. (mevans)
+ */
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ } else {
+ int count = 0;
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_SELECT_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ for (i = 0; i < numfds; i++)
+ {
+ if (real_readfds_p && (FD_ISSET(i, real_readfds_p)))
+ count++;
+ if (real_writefds_p && (FD_ISSET(i, real_writefds_p)))
+ count++;
+ if (real_exceptfds_p && (FD_ISSET(i, real_exceptfds_p)))
+ count++;
+ }
+ ret = count;
+ }
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= NOTOK;
+ SET_ERRNO(EINTR);
+ }
+ } else if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ }
+
+ /* clean up the locks */
+ for (i = 0; i < numfds; i++)
+ { /* Changed by monty */
+ if (FD_ISSET(i,&read_locks)) fd_unlock(i,FD_READ);
+ if (FD_ISSET(i,&rdwr_locks)) fd_unlock(i,FD_RDWR);
+ if (FD_ISSET(i,&write_locks)) fd_unlock(i,FD_WRITE);
+ }
+ if (ret > 0) {
+ if (readfds != NULL) {
+ for (i = 0; i < numfds; i++) {
+ if (! (FD_ISSET(i,readfds) &&
+ FD_ISSET(fd_table[i]->fd.i,real_readfds_p)))
+ FD_CLR(i,readfds);
+ }
+ }
+ if (writefds != NULL) {
+ for (i = 0; i < numfds; i++)
+ if (! (FD_ISSET(i,writefds) &&
+ FD_ISSET(fd_table[i]->fd.i,real_writefds_p)))
+ FD_CLR(i,writefds);
+ }
+ if (exceptfds != NULL) {
+ for (i = 0; i < numfds; i++)
+ if (! (FD_ISSET(i,exceptfds) &&
+ FD_ISSET(fd_table[i]->fd.i,real_exceptfds_p)))
+ FD_CLR(i,exceptfds);
+ }
+ } else {
+ if (exceptfds != NULL) FD_ZERO(exceptfds);
+ if (writefds != NULL) FD_ZERO(writefds);
+ if (readfds != NULL) FD_ZERO(readfds);
+ }
+
+ return(ret);
+}
diff --git a/mit-pthreads/pthreads/sig.c b/mit-pthreads/pthreads/sig.c
new file mode 100644
index 00000000000..85d4465bf1c
--- /dev/null
+++ b/mit-pthreads/pthreads/sig.c
@@ -0,0 +1,452 @@
+/* ==== sig.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : All the thread signal functions.
+ *
+ * 1.32 94/06/12 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+
+#if defined(M_UNIX)
+#define signal(A,B) machdep_sys_signal((A),(B))
+#endif
+
+extern void sig_handler_real();
+
+struct pthread * pthread_sigwait;
+static sigset_t pending_signals;
+
+struct pthread_sigvec {
+ void (*vector)();
+ sigset_t mask;
+ int flags;
+} pthread_sigvec[SIGMAX];
+
+/* ==========================================================================
+ * pthread_sig_register()
+ *
+ * Assumes the kernel is locked.
+ */
+int pthread_sig_register(int sig)
+{
+ struct pthread ** pthread_ptr, * pthread;
+ int ret;
+
+ /*
+ * If we have a siginfo structure and the signal is synchronous then
+ * only deliver the signal to the current thread.
+ */
+
+ /* Check waiting threads for delivery */
+ for (pthread_ptr = &pthread_sigwait; (*pthread_ptr);
+ pthread_ptr = &((*pthread_ptr)->next)) {
+ if (sigismember((*pthread_ptr)->data.sigwait, sig)) {
+ pthread=*pthread_ptr;
+ *pthread_ptr=(*pthread_ptr)->next;
+
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread);
+ ret = pthread->pthread_priority;
+ *(int *)(pthread->ret) = sig;
+ pthread->state = PS_RUNNING;
+
+ return(ret);
+ }
+ }
+
+ /* Check current running thread */
+ if (pthread_run) {
+ if (!sigismember(&pthread_run->sigmask, sig)) {
+ sigaddset(&pthread_run->sigpending, sig);
+ pthread_run->sigcount++;
+ return(0);
+ }
+ }
+
+ /* Check any running thread */
+ for (pthread = pthread_current_prio_queue->next;
+ pthread; pthread = pthread->next) {
+ if (!sigismember(&pthread->sigmask, sig)) {
+ sigaddset(&pthread->sigpending, sig);
+ pthread->sigcount++;
+ return(0);
+ }
+ }
+
+ /* Check any thread */
+ for (pthread = pthread_link_list; pthread; pthread = pthread->pll) {
+ if (!sigismember(&pthread->sigmask, sig)) {
+ sigaddset(&pthread->sigpending, sig);
+ pthread->sigcount++;
+ return(0);
+ }
+ }
+
+ sigaddset(&pending_signals, sig);
+ return(0);
+}
+
+/* ==========================================================================
+ * pthread_sig_default()
+ */
+void pthread_sig_default(int sig)
+{
+ sigset_t mask, omask;
+
+ if (pthread_sigvec[sig].vector == SIG_DFL) {
+ /* Set the signal handler to default before issueing the kill */
+ signal(sig, SIG_DFL);
+ kill(getpid(), sig);
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &mask, &omask);
+ signal(sig, sig_handler_real);
+ }
+}
+
+/* ==========================================================================
+ * pthread_sig_process()
+ *
+ * Assumes the kernel is locked.
+ */
+void pthread_sig_process()
+{
+ void (*vector)();
+ int i, j;
+
+ for (i = 1; i < SIGMAX; i++) {
+ if (sigismember(&(pthread_run->sigpending), i)) {
+ if (! sigismember(&(pthread_run->sigmask), i)) {
+ sigdelset(&(pthread_run->sigpending), i);
+ pthread_run->sigcount--;
+
+ if (pthread_sigvec[i].vector == SIG_IGN) {
+ continue;
+ }
+ if (pthread_sigvec[i].vector == SIG_DFL) {
+ pthread_sig_default(i);
+ continue;
+ }
+
+ {
+ sigset_t omask;
+
+ sigemptyset(&omask);
+ /* Save old mask */
+ for (j = 1; j < SIGMAX; j++) {
+ if (sigismember(&(pthread_run->sigmask), j)) {
+ if (sigismember(&(pthread_sigvec[i].mask), j))
+ sigaddset(&(pthread_run->sigmask), j);
+ sigaddset(&omask, j);
+ }
+ }
+ /* The signal is masked while handling the signal */
+ sigaddset(&(pthread_run->sigmask), i);
+
+ /*
+ * Allow interrupts during a signal,
+ * but not a change in the vector
+ */
+ vector = pthread_sigvec[i].vector;
+ if (--pthread_kernel_lock) {
+ PANIC();
+ }
+ vector(i);
+ pthread_run->sighandled=1; /* Mark for select; Monty */
+ pthread_kernel_lock++;
+
+ memcpy(&(pthread_run->sigmask), &omask, sizeof(omask));
+ }
+ }
+ }
+ }
+}
+
+/* ==========================================================================
+ * pthread_sigmask()
+ *
+ * It is unclear wheather this call should be implemented as an atomic
+ * operation. The resulting mask could be wrong if in the signal
+ * handler the thread calls sigprocmask for any signal other than the
+ * signal the handler is dealing with.
+ */
+int pthread_sigmask(int how, const sigset_t *set, sigset_t * oset)
+{
+ int i;
+
+ if (oset) {
+ sigemptyset(oset);
+ for (i = 1; i < SIGMAX; i++) {
+ if (sigismember(&(pthread_run->sigmask), i)) {
+ sigaddset(oset, i);
+ }
+ }
+ }
+
+ if (set) {
+ switch(how) {
+ case SIG_BLOCK:
+ for (i = 1; i < SIGMAX; i++) {
+ if (sigismember(set, i)) {
+ sigaddset(&(pthread_run->sigmask), i);
+ }
+ }
+ break;
+ case SIG_UNBLOCK:
+ pthread_sched_prevent();
+ for (i = 1; i < SIGMAX; i++) {
+ if (sigismember(set, i)) {
+ sigdelset(&(pthread_run->sigmask), i);
+ if (sigismember(&pending_signals, i)) {
+ sigaddset(&(pthread_run->sigpending), i);
+ sigdelset(&pending_signals, i);
+ pthread_run->sigcount++;
+ }
+ }
+ }
+ pthread_sched_resume();
+ break;
+ case SIG_SETMASK:
+ sigfillset(&(pthread_run->sigmask));
+ pthread_sched_prevent();
+ for (i = 1; i < SIGMAX; i++) {
+ if (! sigismember(set, i)) {
+ sigdelset(&(pthread_run->sigmask), i);
+ if (sigismember(&pending_signals, i)) {
+ sigaddset(&(pthread_run->sigpending), i);
+ sigdelset(&pending_signals, i);
+ pthread_run->sigcount++;
+ }
+ }
+ }
+ pthread_sched_resume();
+ break;
+ default:
+ SET_ERRNO(EINVAL);
+ return(NOTOK);
+ }
+ }
+ return(OK);
+}
+
+int sigprocmask(int how, const sigset_t *set, sigset_t * oset)
+{
+ return(pthread_sigmask(how, set, oset));
+}
+
+/* ==========================================================================
+ * sigwait()
+ */
+int sigwait(const sigset_t * set, int * sig)
+{
+ int i;
+
+ /* Check that sig is valid */
+ *sig = 0;
+
+ pthread_sched_prevent();
+ for (i = 1; i < SIGMAX; i++) {
+ if (sigismember(set, i)) {
+ /* Check personal signals */
+ if (sigismember(&(pthread_run->sigpending), i)) {
+ sigdelset(&(pthread_run->sigpending), i);
+ pthread_sched_resume();
+ *sig = i;
+ return(OK);
+ }
+ /* Check kernel signals */
+ if (sigismember(&pending_signals, i)) {
+ sigdelset(&pending_signals, i);
+ pthread_sched_resume();
+ *sig = i;
+ return(OK);
+ }
+ }
+ }
+
+ /* No pending signals, wait for one */
+ pthread_run->next = pthread_sigwait;
+ pthread_sigwait = pthread_run;
+ pthread_run->data.sigwait = set;
+ pthread_run->ret = sig;
+
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ pthread_resched_resume(PS_SIGWAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+
+ return(OK);
+}
+
+/* ==========================================================================
+ * raise()
+ */
+int raise(int sig)
+{
+ return(pthread_kill(pthread_self(), sig));
+}
+
+/* ==========================================================================
+ * sigsuspend()
+ */
+int sigsuspend(const sigset_t * mask)
+{
+ int ret_sig, ret;
+ sigset_t nm, om;
+
+ sigfillset(&nm);
+ for(ret_sig = 1; ret_sig < SIGMAX; ret_sig++) {
+ if (sigismember(mask, ret_sig)) {
+ sigdelset(&nm, ret_sig);
+ }
+ }
+ pthread_sigmask(SIG_BLOCK, &nm, &om);
+ if ((ret = sigwait(&nm, &ret_sig)) == OK) {
+ sigemptyset(&nm);
+ sigaddset(&nm, ret_sig);
+ pthread_kill(pthread_self(), ret_sig);
+ pthread_sigmask(SIG_UNBLOCK, &nm, NULL);
+ /* There is a race condition here, it's not worth worring about */
+ pthread_sigmask(SIG_BLOCK, &nm, NULL);
+ SET_ERRNO(EINTR);
+ ret = NOTOK;
+ }
+ pthread_sigmask(SIG_SETMASK, &om, NULL);
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_signal()
+ */
+void (*pthread_signal(int sig, void (*dispatch)(int)))()
+{
+ void (*odispatch)(int);
+
+ odispatch = pthread_sigvec[sig].vector;
+ if ((sig > 0) && (sig < SIGMAX)) {
+ pthread_sigvec[sig].vector = dispatch;
+ sigemptyset(&(pthread_sigvec[sig].mask));
+ pthread_sigvec[sig].flags = 0;
+ }
+ return(odispatch);
+}
+
+/* ==========================================================================
+ * pthread_sigprocmask()
+ */
+int pthread_sigaction(int sig, const struct sigaction * act,
+ struct sigaction * oact)
+{
+ if ((sig > 0) && (sig < SIGMAX)) {
+ if (oact) {
+ memcpy(&(oact->sa_mask), &(pthread_sigvec[sig].mask),
+ sizeof(sigset_t));
+ oact->sa_handler = pthread_sigvec[sig].vector;
+ oact->sa_flags = pthread_sigvec[sig].flags;
+ }
+ if (act) {
+ memcpy(&(pthread_sigvec[sig].mask), &(act->sa_mask),
+ sizeof(sigset_t));
+ pthread_sigvec[sig].vector = act->sa_handler;
+ pthread_sigvec[sig].flags = act->sa_flags;
+ }
+ return(OK);
+ }
+ SET_ERRNO(EINVAL);
+ return(NOTOK);
+}
+
+/*
+ * The following here are stolen from BSD because I get mutiply defined
+ * symbols between sig.o and posix_sig.o in Sun's libc.a under Sunos 4.1.3.
+ * The problem is that sigprocmask() is defined in posix_sig.o, in the same
+ * module that a lot of other sigset-primitives are defined, and we have
+ * our definition of sigprocmask() here, but use those other primitives.
+ */
+
+#undef sigemptyset
+#undef sigfillset
+#undef sigaddset
+#undef sigdelset
+#undef sigismember
+
+static const sigset_t __sigemptyset = __SIGEMPTYSET;
+int sigemptyset(sigset_t *set)
+{
+ *set = __sigemptyset;
+ return (0);
+}
+
+static const sigset_t __sigfillset = __SIGFILLSET;
+int sigfillset(sigset_t * set)
+{
+ *set = __sigfillset;
+ return (0);
+}
+
+#define _MAXIMUM_SIG NSIG
+
+int sigaddset(sigset_t *set, int signo)
+{
+ if (signo <= 0 || signo >= _MAXIMUM_SIG) {
+ errno = EINVAL;
+ return -1;
+ }
+ __SIGADDSET(set, signo);
+ return (0);
+}
+
+int sigdelset(sigset_t *set, int signo)
+{
+ if (signo <= 0 || signo >= _MAXIMUM_SIG) {
+ errno = EINVAL;
+ return -1;
+ }
+ __SIGDELSET(set, signo);
+ return (0);
+}
+
+int sigismember(const sigset_t *set, int signo)
+{
+ if (signo <= 0 || signo >= _MAXIMUM_SIG) {
+ errno = EINVAL;
+ return -1;
+ }
+ return(__SIGISMEMBER(set, signo));
+}
+
diff --git a/mit-pthreads/pthreads/signal.c b/mit-pthreads/pthreads/signal.c
new file mode 100644
index 00000000000..7da4183c1cb
--- /dev/null
+++ b/mit-pthreads/pthreads/signal.c
@@ -0,0 +1,653 @@
+/* ==== signal.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Queue functions.
+ *
+ * 1.00 93/07/21 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <config.h>
+#include <pthread.h>
+#include <signal.h>
+
+/* This will force init.o to get dragged in; if you've got support for
+ C++ initialization, that'll cause pthread_init to be called at
+ program startup automatically, so the application won't need to
+ call it explicitly. */
+
+extern char __pthread_init_hack;
+char *__pthread_init_hack_2 = &__pthread_init_hack;
+
+/*
+ * Time which select in fd_kern_wait() will sleep.
+ * If there are no threads to run we sleep for an hour or until
+ * we get an interrupt or an fd thats awakens. To make sure we
+ * don't miss an interrupt this variable gets reset too zero in
+ * sig_handler_real().
+ */
+struct timeval __fd_kern_wait_timeout = { 0, 0 };
+
+/*
+ * Global for user-kernel lock, and blocked signals
+ */
+
+static sig_atomic_t signum_to_process[SIGMAX + 1] = { 0, };
+volatile sig_atomic_t sig_to_process = 0;
+
+/* static volatile sigset_t sig_to_process; */
+static volatile int sig_count = 0;
+
+static void sig_handler(int signal);
+static void set_thread_timer();
+static void __cleanup_after_resume( void );
+void sig_prevent(void);
+void sig_resume(void);
+
+/* ==========================================================================
+ * context_switch()
+ *
+ * This routine saves the current state of the running thread gets
+ * the next thread to run and restores it's state. To allow different
+ * processors to work with this routine, I allow the machdep_restore_state()
+ * to either return or have it return from machdep_save_state with a value
+ * other than 0, this is for implementations which use setjmp/longjmp.
+ */
+static void context_switch()
+{
+ struct pthread **current, *next, *last, **dead;
+
+ if (pthread_run->state == PS_RUNNING) {
+ /* Put current thread back on the queue */
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread_run);
+ }
+
+ /* save floating point registers if necessary */
+ if (!(pthread_run->attr.flags & PTHREAD_NOFLOAT)) {
+ machdep_save_float_state(pthread_run);
+ }
+ /* save state of current thread */
+ if (machdep_save_state()) {
+ return;
+ }
+
+ last = pthread_run;
+
+ /* Poll all fds */
+ fd_kern_poll();
+
+context_switch_reschedule:;
+ /* Are there any threads to run */
+ if (pthread_run = pthread_prio_queue_deq(pthread_current_prio_queue)) {
+ /* restore floating point registers if necessary */
+ if (!(pthread_run->attr.flags & PTHREAD_NOFLOAT)) {
+ machdep_restore_float_state();
+ }
+ uthread_sigmask = &(pthread_run->sigmask);
+ /* restore state of new current thread */
+ machdep_restore_state();
+ return;
+ }
+
+ /* Are there any threads at all */
+ for (next = pthread_link_list; next; next = next->pll) {
+ if ((next->state != PS_UNALLOCED) && (next->state != PS_DEAD)) {
+ sigset_t sig_to_block, oset;
+
+ sigfillset(&sig_to_block);
+
+ /*
+ * Check sig_to_process before calling fd_kern_wait, to handle
+ * things like zero timeouts to select() which would register
+ * a signal with the sig_handler_fake() call.
+ *
+ * This case should ignore SIGVTALRM
+ */
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+ signum_to_process[SIGVTALRM] = 0;
+ if (sig_to_process) {
+ /* Process interrupts */
+ /*
+ * XXX pthread_run should not be set!
+ * Places where it dumps core should be fixed to
+ * check for the existance of pthread_run --proven
+ */
+ sig_handler(0);
+ } else {
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+ /*
+ * Do a wait, timeout is set to a hour unless we get an
+ * intr. before the select in wich case it polls.
+ */
+ fd_kern_wait();
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+ /* Check for interrupts, but ignore SIGVTALR */
+ signum_to_process[SIGVTALRM] = 0;
+ if (sig_to_process) {
+ /* Process interrupts */
+ sig_handler(0);
+ }
+ }
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+ goto context_switch_reschedule;
+ }
+ }
+
+ /* There are no threads alive. */
+ pthread_run = last;
+ exit(0);
+}
+
+#if !defined(HAVE_SYSCALL_SIGSUSPEND) && defined(HAVE_SYSCALL_SIGPAUSE)
+
+/* ==========================================================================
+ * machdep_sys_sigsuspend()
+ */
+int machdep_sys_sigsuspend(sigset_t * set)
+{
+ return(machdep_sys_sigpause(* set));
+}
+
+#endif
+
+/* ==========================================================================
+ * sig_handler_pause()
+ *
+ * Wait until a signal is sent to the process.
+ */
+void sig_handler_pause()
+{
+ sigset_t sig_to_block, sig_to_pause, oset;
+
+ sigfillset(&sig_to_block);
+ sigemptyset(&sig_to_pause);
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+/* if (!(SIG_ANY(sig_to_process))) { */
+ if (!sig_to_process) {
+ machdep_sys_sigsuspend(&sig_to_pause);
+ }
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+}
+
+/* ==========================================================================
+ * context_switch_done()
+ *
+ * This routine does all the things that are necessary after a context_switch()
+ * calls the machdep_restore_state(). DO NOT put this in the context_switch()
+ * routine because sometimes the machdep_restore_state() doesn't return
+ * to context_switch() but instead ends up in machdep_thread_start() or
+ * some such routine, which will need to call this routine and
+ * sig_check_and_resume().
+ */
+void context_switch_done()
+{
+ /* sigdelset((sigset_t *)&sig_to_process, SIGVTALRM); */
+ signum_to_process[SIGVTALRM] = 0;
+ set_thread_timer();
+}
+
+/* ==========================================================================
+ * set_thread_timer()
+ *
+ * Assums kernel is locked.
+ */
+static void set_thread_timer()
+{
+ static int last_sched_attr = SCHED_RR;
+
+ switch (pthread_run->attr.schedparam_policy) {
+ case SCHED_RR:
+ machdep_set_thread_timer(&(pthread_run->machdep_data));
+ break;
+ case SCHED_FIFO:
+ if (last_sched_attr != SCHED_FIFO) {
+ machdep_unset_thread_timer(NULL);
+ }
+ break;
+ case SCHED_IO:
+ if ((last_sched_attr != SCHED_IO) && (!sig_count)) {
+ machdep_set_thread_timer(&(pthread_run->machdep_data));
+ }
+ break;
+ default:
+ machdep_set_thread_timer(&(pthread_run->machdep_data));
+ break;
+ }
+ last_sched_attr = pthread_run->attr.schedparam_policy;
+}
+
+/* ==========================================================================
+ * sigvtalrm()
+ */
+static inline void sigvtalrm()
+{
+ if (sig_count) {
+ sigset_t sigall, oset;
+
+ sig_count = 0;
+
+ /* Unblock all signals */
+ sigemptyset(&sigall);
+ machdep_sys_sigprocmask(SIG_SETMASK, &sigall, &oset);
+ }
+ context_switch();
+ context_switch_done();
+}
+
+/* ==========================================================================
+ * sigdefault()
+ */
+static inline void sigdefault(int sig)
+{
+ int ret;
+
+ ret = pthread_sig_register(sig);
+ if (pthread_run && (ret > pthread_run->pthread_priority)) {
+ sigvtalrm();
+ }
+}
+
+/* ==========================================================================
+ * sig_handler_switch()
+ */
+static inline void sig_handler_switch(int sig)
+{
+ int ret;
+
+ switch(sig) {
+ case 0:
+ break;
+ case SIGVTALRM:
+ sigvtalrm();
+ break;
+ case SIGALRM:
+/* sigdelset((sigset_t *)&sig_to_process, SIGALRM); */
+ signum_to_process[SIGALRM] = 0;
+ switch (ret = sleep_wakeup()) {
+ default:
+ if (pthread_run && (ret > pthread_run->pthread_priority)) {
+ sigvtalrm();
+ }
+ case 0:
+ break;
+ case NOTOK:
+ /* Do the registered action, no threads were sleeping */
+ /* There is a timing window that gets
+ * here when no threads are on the
+ * sleep queue. This is a quick fix.
+ * The real problem is possibly related
+ * to heavy use of condition variables
+ * with time outs.
+ * (mevans)
+ *sigdefault(sig);
+ */
+ break;
+ }
+ break;
+ case SIGCHLD:
+/* sigdelset((sigset_t *)&sig_to_process, SIGCHLD); */
+ signum_to_process[SIGCHLD] = 0;
+ switch (ret = wait_wakeup()) {
+ default:
+ if (pthread_run && (ret > pthread_run->pthread_priority)) {
+ sigvtalrm();
+ }
+ case 0:
+ break;
+ case NOTOK:
+ /* Do the registered action, no threads were waiting */
+ sigdefault(sig);
+ break;
+ }
+ break;
+
+#ifdef SIGINFO
+ case SIGINFO:
+ pthread_dump_info ();
+ /* Then fall through, invoking the application's
+ signal handler after printing our info out.
+
+ I'm not convinced that this is right, but I'm not
+ 100% convinced that it is wrong, and this is how
+ Chris wants it done... */
+#endif
+
+ default:
+ /* Do the registered action */
+ if (!sigismember(uthread_sigmask, sig)) {
+ /*
+ * If the signal isn't masked by the last running thread and
+ * the signal behavior is default or ignore then we can
+ * execute it immediatly. --proven
+ */
+ pthread_sig_default(sig);
+ }
+ signum_to_process[sig] = 0;
+ sigdefault(sig);
+ break;
+ }
+
+}
+
+/* ==========================================================================
+ * sig_handler()
+ *
+ * Process signal that just came in, plus any pending on the signal mask.
+ * All of these must be resolved.
+ *
+ * Assumes the kernel is locked.
+ */
+static void sig_handler(int sig)
+{
+ if (pthread_kernel_lock != 1) {
+ PANIC();
+ }
+
+ if (sig) {
+ sig_handler_switch(sig);
+ }
+
+ while (sig_to_process) {
+ for (sig_to_process = 0, sig = 1; sig <= SIGMAX; sig++) {
+ if (signum_to_process[sig]) {
+ sig_handler_switch(sig);
+ }
+ }
+ }
+
+
+/*
+ if (SIG_ANY(sig_to_process)) {
+ for (sig = 1; sig <= SIGMAX; sig++) {
+ if (sigismember((sigset_t *)&sig_to_process, sig)) {
+ goto sig_handler_top;
+ }
+ }
+ }
+*/
+}
+
+/* ==========================================================================
+ * sig_handler_real()
+ *
+ * On a multi-processor this would need to use the test and set instruction
+ * otherwise the following will work.
+ */
+void sig_handler_real(int sig)
+{
+ /*
+ * Get around systems with BROKEN signal handlers.
+ *
+ * Some systems will reissue SIGCHLD if the handler explicitly
+ * clear the signal pending by either doing a wait() or
+ * ignoring the signal.
+ */
+#if defined BROKEN_SIGNALS
+ if (sig == SIGCHLD) {
+ sigignore(SIGCHLD);
+ signal(SIGCHLD, sig_handler_real);
+ }
+#endif
+
+ if (pthread_kernel_lock) {
+ /* sigaddset((sigset_t *)&sig_to_process, sig); */
+ __fd_kern_wait_timeout.tv_sec = 0;
+ signum_to_process[sig] = 1;
+ sig_to_process = 1;
+ return;
+ }
+ pthread_kernel_lock++;
+
+ sig_count++;
+ sig_handler(sig);
+
+ /* Handle any signals the current thread might have just gotten */
+ if (pthread_run && pthread_run->sigcount) {
+ pthread_sig_process();
+ }
+ pthread_kernel_lock--;
+}
+
+/* ==========================================================================
+ * sig_handler_fake()
+ */
+void sig_handler_fake(int sig)
+{
+ if (pthread_kernel_lock) {
+ /* sigaddset((sigset_t *)&sig_to_process, sig); */
+ signum_to_process[sig] = 1;
+ sig_to_process = 1;
+ return;
+ }
+ pthread_kernel_lock++;
+ sig_handler(sig);
+ while (!(--pthread_kernel_lock)) {
+ if (sig_to_process) {
+ /* if (SIG_ANY(sig_to_process)) { */
+ pthread_kernel_lock++;
+ sig_handler(0);
+ } else {
+ break;
+ }
+ }
+}
+
+/* ==========================================================================
+ * __pthread_signal_delete(int sig)
+ *
+ * Assumes the kernel is locked.
+ */
+void __pthread_signal_delete(int sig)
+{
+ signum_to_process[sig] = 0;
+}
+
+/* ==========================================================================
+ * pthread_sched_other_resume()
+ *
+ * Check if thread to be resumed is of higher priority and if so
+ * stop current thread and start new thread.
+ */
+pthread_sched_other_resume(struct pthread * pthread)
+{
+ pthread->state = PS_RUNNING;
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread);
+
+ if (pthread->pthread_priority > pthread_run->pthread_priority) {
+ if (pthread_kernel_lock == 1) {
+ sig_handler(SIGVTALRM);
+ }
+ }
+
+ __cleanup_after_resume();
+}
+
+/* ==========================================================================
+ * pthread_resched_resume()
+ *
+ * This routine assumes that the caller is the current pthread, pthread_run
+ * and that it has a lock the kernel thread and it wants to reschedule itself.
+ */
+void pthread_resched_resume(enum pthread_state state)
+{
+ pthread_run->state = state;
+
+ /* Since we are about to block this thread, lets see if we are
+ * at a cancel point and if we've been cancelled.
+ * Avoid cancelling dead or unalloced threads.
+ */
+ if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) &&
+ TEST_PTHREAD_IS_CANCELLABLE(pthread_run) &&
+ state != PS_DEAD && state != PS_UNALLOCED ) {
+
+ /* Set this flag to avoid recursively calling pthread_exit */
+ /* We have to set this flag here because we will unlock the
+ * kernel prior to calling pthread_cancel_internal.
+ */
+ SET_PF_RUNNING_TO_CANCEL(pthread_run);
+
+ pthread_run->old_state = state; /* unlock needs this data */
+ pthread_sched_resume(); /* Unlock kernel before cancel */
+ pthread_cancel_internal( 1 ); /* free locks and exit */
+ }
+
+ sig_handler(SIGVTALRM);
+
+ __cleanup_after_resume();
+}
+
+/* ==========================================================================
+ * pthread_sched_resume()
+ */
+void pthread_sched_resume()
+{
+ __cleanup_after_resume();
+}
+
+/*----------------------------------------------------------------------
+ * Function: __cleanup_after_resume
+ * Purpose: cleanup kernel locks after a resume
+ * Args: void
+ * Returns: void
+ * Notes:
+ *----------------------------------------------------------------------*/
+static void
+__cleanup_after_resume( void )
+{
+ /* Only bother if we are truely unlocking the kernel */
+ while (!(--pthread_kernel_lock)) {
+ /* if (SIG_ANY(sig_to_process)) { */
+ if (sig_to_process) {
+ pthread_kernel_lock++;
+ sig_handler(0);
+ continue;
+ }
+ if (pthread_run && pthread_run->sigcount) {
+ pthread_kernel_lock++;
+ pthread_sig_process();
+ continue;
+ }
+ break;
+ }
+
+ if( pthread_run == NULL )
+ return; /* Must be during init processing */
+
+ /* Test for cancel that should be handled now */
+
+ if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) &&
+ TEST_PTHREAD_IS_CANCELLABLE(pthread_run) ) {
+ /* Kernel is already unlocked */
+ pthread_cancel_internal( 1 ); /* free locks and exit */
+ }
+}
+
+/* ==========================================================================
+ * pthread_sched_prevent()
+ */
+void pthread_sched_prevent(void)
+{
+ pthread_kernel_lock++;
+}
+
+/* ==========================================================================
+ * sig_init()
+ *
+ * SIGVTALRM (NOT POSIX) needed for thread timeslice timeouts.
+ * Since it's not POSIX I will replace it with a
+ * virtual timer for threads.
+ * SIGALRM (IS POSIX) so some special handling will be
+ * necessary to fake SIGALRM signals
+ */
+#ifndef SIGINFO
+#define SIGINFO 0
+#endif
+void sig_init(void)
+{
+ static const int signum_to_initialize[] =
+ { SIGCHLD, SIGALRM, SIGVTALRM, SIGINFO, 0 };
+ static const int signum_to_ignore[] = { SIGKILL, SIGSTOP, 0 };
+ int i, j;
+
+#if defined(HAVE_SYSCALL_SIGACTION) || defined(HAVE_SYSCALL_KSIGACTION)
+ struct sigaction act;
+
+ act.sa_handler = sig_handler_real;
+ sigemptyset(&(act.sa_mask));
+ act.sa_flags = 0;
+#endif
+
+ /* Initialize the important signals */
+ for (i = 0; signum_to_initialize[i]; i++) {
+
+#if defined(HAVE_SYSCALL_SIGACTION) || defined(HAVE_SYSCALL_KSIGACTION)
+ if (sigaction(signum_to_initialize[i], &act, NULL)) {
+#else
+ if (signal(signum_to_initialize[i], sig_handler_real)) {
+#endif
+ PANIC();
+ }
+ }
+
+ /* Initialize the rest of the signals */
+ for (j = 1; j < SIGMAX; j++) {
+ for (i = 0; signum_to_initialize[i]; i++) {
+ if (signum_to_initialize[i] == j) {
+ goto sig_next;
+ }
+ }
+ /* Because Solaris 2.4 can't deal -- proven */
+ for (i = 0; signum_to_ignore[i]; i++) {
+ if (signum_to_ignore[i] == j) {
+ goto sig_next;
+ }
+ }
+ pthread_signal(j, SIG_DFL);
+
+#if defined(HAVE_SYSCALL_SIGACTION) || defined(HAVE_SYSCALL_KSIGACTION)
+ sigaction(j, &act, NULL);
+#else
+ signal(j, sig_handler_real);
+#endif
+
+ sig_next:;
+ }
+
+#if defined BROKEN_SIGNALS
+ signal(SIGCHLD, sig_handler_real);
+#endif
+
+}
+
diff --git a/mit-pthreads/pthreads/sleep.c b/mit-pthreads/pthreads/sleep.c
new file mode 100644
index 00000000000..1c13dd2eb1d
--- /dev/null
+++ b/mit-pthreads/pthreads/sleep.c
@@ -0,0 +1,367 @@
+/* ==== sleep.c ============================================================
+ * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : All the appropriate sleep routines.
+ *
+ * 1.00 93/12/28 proven
+ * -Started coding this file.
+ *
+ * 1.36 94/06/04 proven
+ * -Use new timer structure pthread_timer, that uses seconds
+ * -nano seconds. Rewrite all routines completely.
+ *
+ * 1.38 94/06/13 proven
+ * -switch pthread_timer to timespec
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/compat.h>
+
+struct pthread * pthread_sleep = NULL;
+
+/* ==========================================================================
+ * sleep_compare_time()
+ */
+/* static inline int sleep_compare_time(struct timespec * time1,
+ struct timespec * time2) */
+static int sleep_compare_time(struct timespec * time1, struct timespec * time2)
+{
+ if ((time1->tv_sec < time2->tv_sec) ||
+ ((time1->tv_sec == time2->tv_sec) && (time1->tv_nsec < time2->tv_nsec))) {
+ return(-1);
+ }
+ if ((time1->tv_sec == time2->tv_sec) && (time1->tv_nsec == time2->tv_nsec)){
+ return(0);
+ }
+ return(1);
+}
+
+/* ==========================================================================
+ * machdep_stop_timer()
+ *
+ * Returns the time left on the timer.
+ */
+static struct itimerval timestop = { { 0, 0 }, { 0, 0 } };
+
+void machdep_stop_timer(struct timespec *current)
+{
+ struct itimerval timenow;
+
+ setitimer(ITIMER_REAL, & timestop, & timenow);
+ __pthread_signal_delete(SIGALRM);
+ if (current) {
+ current->tv_nsec = timenow.it_value.tv_usec * 1000;
+ current->tv_sec = timenow.it_value.tv_sec;
+ }
+}
+
+/* ==========================================================================
+ * machdep_start_timer()
+ */
+int machdep_start_timer(struct timespec *current, struct timespec *wakeup)
+{
+ struct itimerval timeout;
+
+ timeout.it_value.tv_usec = (wakeup->tv_nsec - current->tv_nsec) / 1000;
+ timeout.it_value.tv_sec = wakeup->tv_sec - current->tv_sec;
+ timeout.it_interval.tv_usec = 0;
+ timeout.it_interval.tv_sec = 0;
+ if (timeout.it_value.tv_usec < 0) {
+ timeout.it_value.tv_usec += 1000000;
+ timeout.it_value.tv_sec--;
+ }
+
+ if (((long) timeout.it_value.tv_sec >= 0) &&
+ ((timeout.it_value.tv_usec) || (timeout.it_value.tv_sec))) {
+ if (setitimer(ITIMER_REAL, & timeout, NULL) < 0)
+ {
+ fprintf(stderr,"Got error %d from setitimer with:\n\
+ wakeup: tv_sec: %ld tv_nsec: %ld\n\
+ current: tv_sec: %ld tv_nsec: %ld\n\
+ argument: tv_sec: %ld tv_usec: %ld\n",
+ errno,
+ wakeup->tv_sec, wakeup->tv_nsec,
+ current->tv_sec, current->tv_nsec,
+ timeout.it_value.tv_sec, timeout.it_value.tv_usec);
+ PANIC();
+ }
+ } else {
+ /*
+ * There is no time on the timer.
+ * This shouldn't happen,
+ * but isn't fatal.
+ */
+ sig_handler_fake(SIGALRM);
+ }
+ return(OK);
+}
+
+/* ==========================================================================
+ * sleep_schedule()
+ *
+ * Assumes that the current thread is the thread to be scheduled
+ * and that the kthread is already locked.
+ */
+void sleep_schedule(struct timespec *current_time, struct timespec *new_time)
+{
+ struct pthread * pthread_sleep_current, * pthread_sleep_prev;
+
+ /* Record the new time as the current thread's wakeup time. */
+ pthread_run->wakeup_time = *new_time;
+
+ /* any threads? */
+ if (pthread_sleep_current = pthread_sleep) {
+ if (sleep_compare_time(&(pthread_sleep_current->wakeup_time),
+ new_time) <= 0) {
+ /* Don't need to restart timer */
+ while (pthread_sleep_current->sll) {
+
+ pthread_sleep_prev = pthread_sleep_current;
+ pthread_sleep_current = pthread_sleep_current->sll;
+
+ if (sleep_compare_time(&(pthread_sleep_current->wakeup_time),
+ new_time) > 0) {
+ pthread_run->sll = pthread_sleep_current;
+ pthread_sleep_prev->sll = pthread_run;
+ return;
+ }
+ }
+
+ /* No more threads in queue, attach pthread_run to end of list */
+ pthread_sleep_current->sll = pthread_run;
+ pthread_run->sll = NULL;
+
+ } else {
+ /* Start timer and enqueue thread */
+ machdep_start_timer(current_time, new_time);
+ pthread_run->sll = pthread_sleep_current;
+ pthread_sleep = pthread_run;
+ }
+ } else {
+ /* Start timer and enqueue thread */
+ machdep_start_timer(current_time, new_time);
+ pthread_sleep = pthread_run;
+ pthread_run->sll = NULL;
+ }
+}
+
+/* ==========================================================================
+ * sleep_wakeup()
+ *
+ * This routine is called by the interrupt handler, which has already
+ * locked the current kthread. Since all threads on this list are owned
+ * by the current kthread, rescheduling won't be a problem.
+ */
+int sleep_spurious_wakeup = 0;
+int sleep_wakeup()
+{
+ struct pthread *pthread_sleep_next;
+ struct timespec current_time;
+ int ret = 0;
+
+ if (pthread_sleep == NULL) {
+ return(NOTOK);
+ }
+
+ machdep_gettimeofday(&current_time);
+ if (sleep_compare_time(&(pthread_sleep->wakeup_time), &current_time) > 0) {
+ machdep_start_timer(&current_time, &(pthread_sleep->wakeup_time));
+ sleep_spurious_wakeup++;
+ return(OK);
+ }
+
+ do {
+ if (pthread_sleep->pthread_priority > ret) {
+ ret = pthread_sleep->pthread_priority;
+ }
+
+ /*
+ * Clean up removed thread and start it running again.
+ *
+ * Note: It is VERY important to remove the thread form the
+ * current queue before putting it on the run queue.
+ * Both queues use pthread_sleep->next, and the thread that points
+ * to pthread_sleep should point to pthread_sleep->next then
+ * pthread_sleep should be put on the run queue.
+ */
+ if ((SET_PF_DONE_EVENT(pthread_sleep)) == OK) {
+ if (pthread_sleep->queue)
+ pthread_queue_remove(pthread_sleep->queue, pthread_sleep);
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread_sleep);
+ pthread_sleep->state = PS_RUNNING;
+ }
+
+ pthread_sleep_next = pthread_sleep->sll;
+ pthread_sleep->sll = NULL;
+
+ if ((pthread_sleep = pthread_sleep_next) == NULL) {
+ /* No more threads on sleep queue */
+ return(ret);
+ }
+ } while (sleep_compare_time(&(pthread_sleep->wakeup_time), &(current_time)) <= 0);
+
+ /* Start timer for next time interval */
+ machdep_start_timer(&current_time, &(pthread_sleep->wakeup_time));
+ return(ret);
+}
+
+
+/* ==========================================================================
+ * __sleep()
+ */
+void __sleep(struct timespec * time_to_sleep)
+{
+ struct pthread *pthread_sleep_prev;
+ struct timespec current_time, wakeup_time;
+
+ pthread_sched_prevent();
+
+ /* Get real time */
+ machdep_gettimeofday(&current_time);
+ wakeup_time.tv_sec = current_time.tv_sec + time_to_sleep->tv_sec;
+ wakeup_time.tv_nsec = current_time.tv_nsec + time_to_sleep->tv_nsec;
+
+ sleep_schedule(&current_time, &wakeup_time);
+
+ /* Reschedule thread */
+ SET_PF_WAIT_EVENT(pthread_run);
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ pthread_resched_resume(PS_SLEEP_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+ CLEAR_PF_DONE_EVENT(pthread_run);
+
+ /* Return actual time slept */
+ time_to_sleep->tv_sec = pthread_run->wakeup_time.tv_sec;
+ time_to_sleep->tv_nsec = pthread_run->wakeup_time.tv_nsec;
+}
+
+/* ==========================================================================
+ * pthread_nanosleep()
+ */
+unsigned int pthread_nanosleep(unsigned int nseconds)
+{
+ struct timespec time_to_sleep;
+
+ if (nseconds) {
+ time_to_sleep.tv_nsec = nseconds;
+ time_to_sleep.tv_sec = 0;
+ __sleep(&time_to_sleep);
+ nseconds = time_to_sleep.tv_nsec;
+ }
+ return(nseconds);
+}
+
+/* ==========================================================================
+ * usleep()
+ */
+void usleep(unsigned int useconds)
+{
+ struct timespec time_to_sleep;
+
+ if (useconds) {
+ time_to_sleep.tv_nsec = (useconds % 1000000) * 1000;
+ time_to_sleep.tv_sec = useconds / 1000000;
+ __sleep(&time_to_sleep);
+ }
+}
+
+/* ==========================================================================
+ * sleep()
+ */
+unsigned int sleep(unsigned int seconds)
+{
+ struct timespec time_to_sleep;
+
+ if (seconds) {
+ time_to_sleep.tv_sec = seconds;
+ time_to_sleep.tv_nsec = 0;
+ __sleep(&time_to_sleep);
+ seconds = time_to_sleep.tv_sec;
+ }
+ return(seconds);
+}
+
+/* ==========================================================================
+ * sleep_cancel()
+ *
+ * Cannot be called while kernel is locked.
+ * Does not wake sleeping thread up, just remove it from the sleep queue.
+ */
+int sleep_cancel(struct pthread * pthread)
+{
+ struct timespec current_time, delta_time;
+ struct pthread * pthread_last;
+ int rval = NOTOK;
+
+ /* Lock sleep queue, Note this may be on a different kthread queue */
+ pthread_sched_prevent();
+
+ if (pthread_sleep) {
+ if (pthread == pthread_sleep) {
+ rval = OK;
+ machdep_stop_timer(&delta_time);
+ if (pthread_sleep = pthread_sleep->sll) {
+ current_time.tv_sec = delta_time.tv_sec;
+ current_time.tv_nsec = delta_time.tv_nsec;
+ current_time.tv_sec += pthread_sleep->wakeup_time.tv_sec;
+ current_time.tv_nsec += pthread_sleep->wakeup_time.tv_nsec;
+ while (current_time.tv_nsec > 1000000000) {
+ current_time.tv_nsec -= 1000000000;
+ current_time.tv_sec++;
+ }
+ machdep_start_timer(&(current_time),
+ &(pthread_sleep->wakeup_time));
+ }
+ } else {
+ for (pthread_last = pthread_sleep; pthread_last;
+ pthread_last = pthread_last->sll) {
+ if (pthread_last->sll == pthread) {
+ pthread_last->sll = pthread->sll;
+ rval = OK;
+ break;
+ }
+ }
+ }
+ }
+
+ pthread_sched_resume();
+ pthread->sll = NULL;
+ return(rval);
+}
diff --git a/mit-pthreads/pthreads/specific.c b/mit-pthreads/pthreads/specific.c
new file mode 100644
index 00000000000..898f9b0cd1b
--- /dev/null
+++ b/mit-pthreads/pthreads/specific.c
@@ -0,0 +1,198 @@
+/* ==== specific.c =======================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Pthread thread specific data management.
+ *
+ * 1.20 94/03/30 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct pthread_key key_table[PTHREAD_DATAKEYS_MAX];
+static pthread_mutex_t key_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* ==========================================================================
+ * pthread_key_create()
+ */
+int pthread_key_create(pthread_key_t *key, void (*destructor)(void *))
+{
+ pthread_mutex_lock(&key_mutex);
+ for ((*key) = 0; (*key) < PTHREAD_DATAKEYS_MAX; (*key)++) {
+ if (key_table[(*key)].count == 0) {
+ key_table[(*key)].count++;
+ key_table[(*key)].destructor = destructor;
+ pthread_mutex_init(&(key_table[(*key)].mutex), NULL);
+ pthread_mutex_unlock(&key_mutex);
+ return(OK);
+ }
+ }
+ pthread_mutex_unlock(&key_mutex);
+ return(EAGAIN);
+}
+
+/* ==========================================================================
+ * pthread_key_delete()
+ */
+int pthread_key_delete(pthread_key_t key)
+{
+ int ret;
+
+ if (key < PTHREAD_DATAKEYS_MAX) {
+ pthread_mutex_lock(&(key_table[key].mutex));
+ switch (key_table[key].count) {
+ case 1:
+ pthread_mutex_destroy(&(key_table[key].mutex));
+ key_table[key].destructor = NULL;
+ key_table[key].count = 0;
+ case 0:
+ ret = OK;
+ break;
+ default:
+ ret = EBUSY;
+ }
+ pthread_mutex_unlock(&(key_table[key].mutex));
+ } else {
+ ret = EINVAL;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_cleanupspecific()
+ */
+void pthread_cleanupspecific(void)
+{
+ void * data;
+ int key;
+ int itr;
+
+ pthread_mutex_lock(&key_mutex);
+ for (itr = 0; itr < _POSIX_THREAD_DESTRUTOR_ITERATIONS; itr++) {
+ for (key = 0; key < PTHREAD_DATAKEYS_MAX; key++) {
+ if (pthread_run->specific_data_count) {
+ if (pthread_run->specific_data[key]) {
+ data = (void *)pthread_run->specific_data[key];
+ pthread_run->specific_data[key] = NULL;
+ pthread_run->specific_data_count--;
+ if (key_table[key].destructor) {
+ pthread_mutex_unlock(&key_mutex);
+ key_table[key].destructor(data);
+ pthread_mutex_lock(&key_mutex);
+ }
+ key_table[key].count--;
+ }
+ } else {
+ free(pthread_run->specific_data);
+ pthread_mutex_unlock(&key_mutex);
+ return;
+ }
+ }
+ }
+ free(pthread_run->specific_data);
+ pthread_mutex_unlock(&key_mutex);
+}
+
+static inline const void ** pthread_key_allocate_data(void)
+{
+ const void ** new_data;
+ if(new_data = (const void**)malloc(sizeof(void *) * PTHREAD_DATAKEYS_MAX)) {
+ memset((void *)new_data, 0, sizeof(void *) * PTHREAD_DATAKEYS_MAX);
+ }
+ return(new_data);
+}
+
+/* ==========================================================================
+ * pthread_setspecific()
+ */
+int pthread_setspecific(pthread_key_t key, const void * value)
+{
+ int ret;
+
+ if ((pthread_run->specific_data) ||
+ (pthread_run->specific_data = pthread_key_allocate_data())) {
+ if ((key < PTHREAD_DATAKEYS_MAX) && (key_table)) {
+ pthread_mutex_lock(&(key_table[key].mutex));
+ if (key_table[key].count) {
+ if (pthread_run->specific_data[key] == NULL) {
+ if (value != NULL) {
+ pthread_run->specific_data_count++;
+ key_table[key].count++;
+ }
+ } else {
+ if (value == NULL) {
+ pthread_run->specific_data_count--;
+ key_table[key].count--;
+ }
+ }
+ pthread_run->specific_data[key] = value;
+ ret = OK;
+ } else {
+ ret = EINVAL;
+ }
+ pthread_mutex_unlock(&(key_table[key].mutex));
+ } else {
+ ret = EINVAL;
+ }
+ } else {
+ ret = ENOMEM;
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * pthread_getspecific()
+ */
+void * pthread_getspecific(pthread_key_t key)
+{
+ void *ret;
+
+ if ((pthread_run->specific_data) && (key < PTHREAD_DATAKEYS_MAX)
+ && (key_table)) {
+ pthread_mutex_lock(&(key_table[key].mutex));
+ if (key_table[key].count) {
+ ret = (void *)pthread_run->specific_data[key];
+ } else {
+ ret = NULL;
+ }
+ pthread_mutex_unlock(&(key_table[key].mutex));
+ } else {
+ ret = NULL;
+ }
+ return(ret);
+}
diff --git a/mit-pthreads/pthreads/stat.c b/mit-pthreads/pthreads/stat.c
new file mode 100644
index 00000000000..f18b7c6bd24
--- /dev/null
+++ b/mit-pthreads/pthreads/stat.c
@@ -0,0 +1,116 @@
+/* ==== stat.c ============================================================
+ * Copyright (c) 1995 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : All the syscalls dealing with fds.
+ *
+ * 1.00 93/05/27 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <errno.h>
+
+struct stat;
+struct statfs;
+
+/* ==========================================================================
+ * fstat()
+ *
+ * Might want to indirect this.
+ */
+int fstat(int fd, struct stat *buf)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_fstat(fd_table[fd]->fd.i, buf)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * stat()
+ */
+int stat(const char * path, struct stat * buf)
+{
+ int ret;
+
+ if ((ret = machdep_sys_stat(path, buf)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * lstat()
+ */
+int lstat(const char * path, struct stat * buf)
+{
+ int ret;
+
+ if ((ret = machdep_sys_lstat(path, buf)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ return(ret);
+
+}
+
+#ifdef HAVE_SYSCALL_FSTATFS
+/* ==========================================================================
+ * fstatfs()
+ *
+ * Might want to indirect this.
+ */
+int fstatfs(int fd, struct statfs *buf)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_fstatfs(fd_table[fd]->fd.i, buf)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ return(ret);
+}
+#endif
diff --git a/mit-pthreads/pthreads/wait.c b/mit-pthreads/pthreads/wait.c
new file mode 100644
index 00000000000..9f0418ca8a1
--- /dev/null
+++ b/mit-pthreads/pthreads/wait.c
@@ -0,0 +1,159 @@
+/* ==== wait.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : All the appropriate wait routines.
+ *
+ * 1.38 94/06/13 proven
+ * -Started coding this file.
+ *
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include <pthread.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <pthread/posix.h>
+#include <sys/compat.h>
+#include <sys/wait.h>
+
+/* This is an UGLY hack to get wait to compile, something better is needed. */
+/* #define _POSIX_SOURCE
+#undef _POSIX_SOURCE
+*/
+
+struct pthread_queue wait_queue = { NULL, NULL, NULL };
+extern void sig_handler_real();
+
+/* ==========================================================================
+ * wait_wakeup()
+ *
+ * This routine is called by the interrupt handler which has locked
+ * the current kthread semaphore. Since only threads owned by the
+ * current kthread can be queue here, no additional locks are necessary.
+ */
+int wait_wakeup()
+{
+ struct pthread *pthread;
+ int ret = 0;
+
+ if (pthread = pthread_queue_deq(& wait_queue)) {
+ /* Wakeup all threads, and enqueue them on the run queue */
+ do {
+ pthread->state = PS_RUNNING;
+ if (pthread->pthread_priority > ret) {
+ ret = pthread->pthread_priority;
+ }
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread);
+ } while (pthread = pthread_queue_deq(&wait_queue));
+ return(ret);
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * For the wait calls, it is important that the current kthread is locked
+ * before the apropriate wait syscall is preformed. This way we ensure
+ * that there is never a case where a thread is waiting for a child but
+ * missed the interrupt for that child.
+ * Patched by William S. Lear 1997-02-02
+ */
+
+/* ==========================================================================
+ * waitpid()
+ */
+pid_t waitpid(pid_t pid, int *status, int options)
+{
+ pid_t ret;
+
+ pthread_sched_prevent();
+ ret = machdep_sys_waitpid(pid, status, options | WNOHANG);
+ /* If we are not doing nohang, try again, else return immediately */
+ if (!(options & WNOHANG)) {
+ while (ret == OK) {
+ /* Enqueue thread on wait queue */
+ pthread_queue_enq(&wait_queue, pthread_run);
+
+ /* reschedule unlocks scheduler */
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ pthread_resched_resume(PS_WAIT_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+
+ pthread_sched_prevent();
+
+ ret = machdep_sys_waitpid(pid, status, options | WNOHANG);
+ }
+ }
+ pthread_sched_resume();
+ return(ret);
+}
+
+/* ==========================================================================
+ * wait3()
+ * Patched by Monty 1997-02-02
+ */
+pid_t wait3(__WAIT_STATUS status, int options, void * rusage)
+{
+ semaphore * lock;
+ pid_t ret;
+
+ pthread_sched_prevent();
+ ret = machdep_sys_wait3(status, options | WNOHANG, rusage);
+ /* If we are not doing nohang, try again, else return immediately */
+ if (!(options & WNOHANG)) {
+ while (ret == OK) {
+ /* Enqueue thread on wait queue */
+ pthread_queue_enq(&wait_queue, pthread_run);
+
+ /* reschedule unlocks scheduler */
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
+ pthread_resched_resume(PS_WAIT_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
+
+ pthread_sched_prevent();
+
+ machdep_sys_wait3(status, options | WNOHANG, rusage);
+ }
+ }
+ pthread_sched_resume();
+ return(ret);
+}
+
+/* ==========================================================================
+ * wait()
+ */
+pid_t wait(__WAIT_STATUS status)
+{
+ return(waitpid((pid_t)-1, (int *)status, 0));
+}
diff --git a/mit-pthreads/pthreads/wrapper.c b/mit-pthreads/pthreads/wrapper.c
new file mode 100644
index 00000000000..6e3f4478fcf
--- /dev/null
+++ b/mit-pthreads/pthreads/wrapper.c
@@ -0,0 +1,149 @@
+/* ==== wrapper.c ============================================================
+ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Wrapper functions for syscalls that only need errno redirected
+ *
+ * 1.4x 94/07/23 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include "config.h"
+#include <pthread.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <pthread/posix.h>
+
+/* ==========================================================================
+ * link()
+ */
+int link(const char * name1, const char * name2)
+{
+ int ret;
+
+ if ((ret = machdep_sys_link(name1, name2)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * unlink()
+ */
+int unlink(const char * path)
+{
+ int ret;
+
+ if ((ret = machdep_sys_unlink(path)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * chdir()
+ */
+int chdir(const char * path)
+{
+ int ret;
+
+ if ((ret = machdep_sys_chdir(path)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * chmod()
+ */
+int chmod(const char * path, mode_t mode)
+{
+ int ret;
+
+ if ((ret = machdep_sys_chmod(path, mode)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * chown()
+ */
+int chown(const char * path, uid_t owner, gid_t group)
+{
+ int ret;
+
+ if ((ret = machdep_sys_chown(path, owner, group)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+
+/* ==========================================================================
+ * rename()
+ */
+int rename(const char * name1, const char * name2)
+{
+ int ret;
+
+ if ((ret = machdep_sys_rename(name1, name2)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+
+
+/* ==========================================================================
+ * chroot()
+ */
+
+#ifdef HAVE_SYSCALL_CHROOT
+int chroot(const char * name)
+{
+ int ret;
+
+ if ((ret = machdep_sys_chroot(name)) < OK) {
+ SET_ERRNO(-ret);
+ }
+ return(ret);
+
+}
+#endif
diff --git a/mit-pthreads/pthreads/writev.c b/mit-pthreads/pthreads/writev.c
new file mode 100644
index 00000000000..9823d5ad201
--- /dev/null
+++ b/mit-pthreads/pthreads/writev.c
@@ -0,0 +1,89 @@
+/* ==== writev.c ============================================================
+ * Copyright (c) 1995 by Chris Provenzano, proven@mit.edu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Chris Provenzano.
+ * 4. The name of Chris Provenzano may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Description : Implementation of writev().
+ *
+ * 1.00 95/06/19 proven
+ * -Started coding this file.
+ */
+
+#ifndef lint
+static const char rcsid[] = "$Id$";
+#endif
+
+#include "config.h"
+
+#ifndef HAVE_SYSCALL_WRITEV
+
+#include <errno.h>
+#include <unistd.h>
+#include <sys/uio.h>
+#include <sys/types.h>
+
+/* ==========================================================================
+ * machdep_sys_writev()
+ *
+ * modified from the GNU C Library posix/writev.c
+ */
+int machdep_sys_writev(int fd, struct iovec * vector, int count)
+{
+ size_t bytes, i;
+ char *buffer;
+ int ret;
+
+ /* Find the total number of bytes to be written. */
+ for (bytes = 0, i = 0; i < count; ++i)
+ bytes += vector[i].iov_len;
+
+ if (bytes) {
+ /*
+ * Allocate a temporary buffer to hold the data.
+ * Don't use alloca because threads tend to have smaller stacks.
+ */
+ if ((buffer = (char *)malloc(bytes)) == NULL) {
+ return(-ENOMEM);
+ }
+ /* Copy the data from memory specified by VECTOR to BUFFER */
+ for (ret = 0, i = 0; i < count; ++i) {
+ memcpy(buffer + ret, vector[i].iov_base, vector[i].iov_len);
+ ret += vector[i].iov_len;
+ }
+ } else {
+ buffer = NULL;
+ }
+
+ ret = (int)machdep_sys_write(fd, buffer, bytes);
+ if (buffer)
+ free(buffer);
+ return(ret);
+}
+
+#endif