summaryrefslogtreecommitdiff
path: root/libjava/include/posix-threads.h
diff options
context:
space:
mode:
Diffstat (limited to 'libjava/include/posix-threads.h')
-rw-r--r--libjava/include/posix-threads.h163
1 files changed, 159 insertions, 4 deletions
diff --git a/libjava/include/posix-threads.h b/libjava/include/posix-threads.h
index ad09bedd45a..e033209d088 100644
--- a/libjava/include/posix-threads.h
+++ b/libjava/include/posix-threads.h
@@ -1,7 +1,7 @@
// -*- c++ -*-
// posix-threads.h - Defines for using POSIX threads.
-/* Copyright (C) 1998, 1999 Free Software Foundation
+/* Copyright (C) 1998, 1999, 2001 Free Software Foundation
This file is part of libgcj.
@@ -106,10 +106,21 @@ _Jv_CondInit (_Jv_ConditionVariable_t *cv)
// Mutexes.
//
+#ifdef LOCK_DEBUG
+# include <stdio.h>
+#endif
+
inline void
_Jv_MutexInit (_Jv_Mutex_t *mu)
{
+# ifdef LOCK_DEBUG /* Assumes Linuxthreads */
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ pthread_mutex_init (&mu->mutex, &attr);
+# else
pthread_mutex_init (&mu->mutex, 0);
+# endif
mu->count = 0;
mu->owner = 0;
@@ -125,7 +136,16 @@ _Jv_MutexLock (_Jv_Mutex_t *mu)
}
else
{
- pthread_mutex_lock (&mu->mutex);
+# ifdef LOCK_DEBUG
+ int result = pthread_mutex_lock (&mu->mutex);
+ if (0 != result)
+ {
+ fprintf(stderr, "Pthread_mutex_lock returned %d\n", result);
+ for (;;) {}
+ }
+# else
+ pthread_mutex_lock (&mu->mutex);
+# endif
mu->count = 1;
mu->owner = self;
}
@@ -136,14 +156,29 @@ inline int
_Jv_MutexUnlock (_Jv_Mutex_t *mu)
{
if (_Jv_PthreadCheckMonitor (mu))
- return 1;
+ {
+# ifdef LOCK_DEBUG
+ fprintf(stderr, "_Jv_MutexUnlock: Not owner\n");
+ for (;;) {}
+# endif
+ return 1;
+ }
mu->count--;
if (mu->count == 0)
{
mu->owner = 0;
- pthread_mutex_unlock (&mu->mutex);
+# ifdef LOCK_DEBUG
+ int result = pthread_mutex_unlock (&mu->mutex);
+ if (0 != result)
+ {
+ fprintf(stderr, "Pthread_mutex_unlock returned %d\n", result);
+ for (;;) {}
+ }
+# else
+ pthread_mutex_unlock (&mu->mutex);
+# endif
}
return 0;
}
@@ -179,6 +214,126 @@ _Jv_ThreadCurrent (void)
return (java::lang::Thread *) pthread_getspecific (_Jv_ThreadKey);
}
+#ifdef JV_HASH_SYNCHRONIZATION
+// Should be specialized to just load the "current thread" register
+// on platforms that support it. Speed is of the essence. The value
+// of the descriptor is not, so long as there is a one-to-one correspondence
+// to threads.
+
+
+#ifdef __i386__
+
+#define SLOW_PTHREAD_SELF
+ // Add a cache for pthread_self() if we don't have the thread
+ // pointer in a register.
+
+#endif /* __i386__ */
+
+#ifdef __ia64__
+
+typedef size_t _Jv_ThreadId_t;
+
+register size_t _Jv_self __asm__("r13");
+ // For linux_threads this is really a pointer to its thread data
+ // structure. We treat it as opaque. That should also work
+ // on other operating systems that follow the ABI standard.
+
+// This should become the prototype for machines that maintain a thread
+// pointer in a register.
+inline _Jv_ThreadId_t
+_Jv_ThreadSelf (void)
+{
+ return _Jv_self;
+}
+
+#define JV_SELF_DEFINED
+
+#endif /* __ia64__ */
+
+#if defined(SLOW_PTHREAD_SELF)
+
+typedef pthread_t _Jv_ThreadId_t;
+
+// E.g. on X86 Linux, pthread_self() is too slow for our purpose.
+// Instead we maintain a cache based on the current sp value.
+// This is similar to what's done for thread local allocation in the
+// GC, only far simpler.
+// This code should probably go away when Linux/X86 starts using a
+// segment register to hold the thread id.
+# define LOG_THREAD_SPACING 12
+ // If two thread pointer values are closer than
+ // 1 << LOG_THREAD_SPACING, we assume they belong
+ // to the same thread.
+# define SELF_CACHE_SIZE 1024
+# define SC_INDEX(sp) (((unsigned long)(sp) >> 19) & (SELF_CACHE_SIZE-1))
+ // Mapping from sp value to cache index.
+ // Note that this is not in any real sense a hash
+ // function, since we need to be able to clear
+ // all possibly matching slots on thread startup.
+ // Thus all entries that might correspond to
+ // a given thread are intentionally contiguous.
+ // Works well with anything that allocates at least
+ // 512KB stacks.
+# define SC_CLEAR_MIN (-16) // When starting a new thread, we clear
+# define SC_CLEAR_MAX 0 // all self cache entries between
+ // SC_INDEX(sp)+SC_CLEAR_MIN and
+ // SC_INDEX(sp)+SC_CLEAR_MAX to ensure
+ // we never see stale values. The
+ // current values assume a downward
+ // growing stack of size <= 7.5 MB.
+# define BAD_HIGH_SP_VALUE ((size_t)(-1))
+
+extern volatile
+struct self_cache_entry {
+ size_t high_sp_bits; // sp value >> LOG_THREAD_SPACING
+ pthread_t self; // Corresponding thread
+} _Jv_self_cache[];
+
+void _Jv_Self_Cache_Init();
+
+_Jv_ThreadId_t
+_Jv_ThreadSelf_out_of_line(volatile self_cache_entry *sce,
+ size_t high_sp_bits);
+
+inline _Jv_ThreadId_t
+_Jv_ThreadSelf (void)
+{
+ int dummy;
+ size_t sp = (size_t)(&dummy);
+ unsigned h = SC_INDEX(sp);
+ volatile self_cache_entry *sce = _Jv_self_cache + h;
+ pthread_t candidate_self = sce -> self; // Read must precede following one.
+ // Read barrier goes here, if needed.
+ if (sce -> high_sp_bits == sp >> LOG_THREAD_SPACING)
+ {
+ // The sce -> self value we read must be valid. An intervening
+ // cache replacement by another thread would have first replaced
+ // high_sp_bits by something else, and it can't possibly change
+ // back without our intervention.
+ return candidate_self;
+ }
+ else
+ return _Jv_ThreadSelf_out_of_line(sce, sp >> LOG_THREAD_SPACING);
+}
+
+#define JV_SELF_DEFINED
+
+#endif /* SLOW_PTHREAD_SELF */
+
+#ifndef JV_SELF_DEFINED /* If all else fails, call pthread_self directly */
+
+typedef pthread_t _Jv_ThreadId_t;
+
+inline _Jv_ThreadId_t
+_Jv_ThreadSelf (void)
+{
+ return pthread_self();
+}
+
+#endif /* !JV_SELF_DEFINED */
+
+#endif /* JV_HASH_SYNCHRONIZATION */
+
inline _Jv_Thread_t *
_Jv_ThreadCurrentData (void)
{