summaryrefslogtreecommitdiff
path: root/evthread.c
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2011-07-04 12:22:54 -0400
committerNick Mathewson <nickm@torproject.org>2011-07-04 12:22:54 -0400
commit78fb99ceb265d5969e06f55b9b9f78d1593ebb7b (patch)
treef3b11e58882747c92bde43852b27e64e80f33a8f /evthread.c
parentc3d362858caf2a2162fa8ca8360f9cc39ae49628 (diff)
parente7fe92709e7f83e5189cbcf485ec3a040f0710b5 (diff)
downloadlibevent-78fb99ceb265d5969e06f55b9b9f78d1593ebb7b.tar.gz
Merge remote-tracking branch 'origin/patches-2.0'
Diffstat (limited to 'evthread.c')
-rw-r--r--evthread.c92
1 files changed, 90 insertions, 2 deletions
diff --git a/evthread.c b/evthread.c
index 37621dd8..22d09ae4 100644
--- a/evthread.c
+++ b/evthread.c
@@ -77,12 +77,25 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
? &_original_lock_fns : &_evthread_lock_fns;
if (!cbs) {
+ if (target->alloc)
+ event_warnx("Trying to disable lock functions after "
+ "they have been set up will probaby not work.");
memset(target, 0, sizeof(_evthread_lock_fns));
return 0;
}
+ if (target->alloc) {
+ /* Uh oh; we already had locking callbacks set up.*/
+ if (!memcmp(target, cbs, sizeof(_evthread_lock_fns))) {
+ /* no change -- allow this. */
+ return 0;
+ }
+ event_warnx("Can't change lock callbacks once they have been "
+ "initialized.");
+ return -1;
+ }
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
memcpy(target, cbs, sizeof(_evthread_lock_fns));
- return 0;
+ return event_global_setup_locks_(1);
} else {
return -1;
}
@@ -96,8 +109,24 @@ evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
? &_original_cond_fns : &_evthread_cond_fns;
if (!cbs) {
+ if (target->alloc_condition)
+ event_warnx("Trying to disable condition functions "
+ "after they have been set up will probaby not "
+ "work.");
memset(target, 0, sizeof(_evthread_cond_fns));
- } else if (cbs->alloc_condition && cbs->free_condition &&
+ return 0;
+ }
+ if (target->alloc_condition) {
+ /* Uh oh; we already had condition callbacks set up.*/
+ if (!memcmp(target, cbs, sizeof(_evthread_cond_fns))) {
+ /* no change -- allow this. */
+ return 0;
+ }
+ event_warnx("Can't change condition callbacks once they "
+ "have been initialized.");
+ return -1;
+ }
+ if (cbs->alloc_condition && cbs->free_condition &&
cbs->signal_condition && cbs->wait_condition) {
memcpy(target, cbs, sizeof(_evthread_cond_fns));
}
@@ -259,6 +288,9 @@ evthread_enable_lock_debuging(void)
sizeof(struct evthread_condition_callbacks));
_evthread_cond_fns.wait_condition = debug_cond_wait;
_evthread_lock_debugging_enabled = 1;
+
+ /* XXX return value should get checked. */
+ event_global_setup_locks_(0);
}
int
@@ -282,6 +314,62 @@ _evthread_debug_get_real_lock(void *lock_)
return lock->lock;
}
+void *
+evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
+{
+ /* there are four cases here:
+ 1) we're turning on debugging; locking is not on.
+ 2) we're turning on debugging; locking is on.
+ 3) we're turning on locking; debugging is not on.
+ 4) we're turning on locking; debugging is on. */
+
+ if (!enable_locks && _original_lock_fns.alloc == NULL) {
+ /* Case 1: allocate a debug lock. */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return debug_lock_alloc(locktype);
+ } else if (!enable_locks && _original_lock_fns.alloc != NULL) {
+ /* Case 2: wrap the lock in a debug lock. */
+ struct debug_lock *lock;
+ EVUTIL_ASSERT(lock_ != NULL);
+
+ if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
+ /* We can't wrap it: We need a recursive lock */
+ _original_lock_fns.free(lock_, locktype);
+ return debug_lock_alloc(locktype);
+ }
+ lock = mm_malloc(sizeof(struct debug_lock));
+ if (!lock) {
+ _original_lock_fns.free(lock_, locktype);
+ return NULL;
+ }
+ lock->lock = lock_;
+ lock->locktype = locktype;
+ lock->count = 0;
+ lock->held_by = 0;
+ return lock;
+ } else if (enable_locks && ! _evthread_lock_debugging_enabled) {
+ /* Case 3: allocate a regular lock */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return _evthread_lock_fns.alloc(locktype);
+ } else {
+ /* Case 4: Fill in a debug lock with a real lock */
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(enable_locks &&
+ _evthread_lock_debugging_enabled);
+ EVUTIL_ASSERT(lock->locktype == locktype);
+ EVUTIL_ASSERT(lock->lock == NULL);
+ lock->lock = _original_lock_fns.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock->lock) {
+ lock->count = -200;
+ mm_free(lock);
+ return NULL;
+ }
+ return lock;
+ }
+}
+
+
#ifndef EVTHREAD_EXPOSE_STRUCTS
unsigned long
_evthreadimpl_get_id()