From ea773779cdf8621c4d1ca13fed2fff4c239d3608 Mon Sep 17 00:00:00 2001 From: dormando Date: Mon, 9 Jan 2017 01:03:09 -0800 Subject: stop using atomics for item refcount management when I first split the locks up further I had a trick where "item_remove()" did not require holding the associated item lock. If an item were to be freed, it would then do the necessary work.; Since then, all calls to refcount_incr and refcount_decr only happen while the item is locked. This was mostly due to the slab mover being very tricky with locks. The atomic is no longer needed as the refcount is only ever checked after a lock to the item. Calling atomics is pretty expensive, especially in multicore/multisocket scenarios. This yields a notable performance increase. --- thread.c | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'thread.c') diff --git a/thread.c b/thread.c index 2908d57..baae274 100644 --- a/thread.c +++ b/thread.c @@ -79,36 +79,6 @@ static pthread_cond_t init_cond; static void thread_libevent_process(int fd, short which, void *arg); -unsigned short refcount_incr(unsigned short *refcount) { -#ifdef HAVE_GCC_ATOMICS - return __sync_add_and_fetch(refcount, 1); -#elif defined(__sun) - return atomic_inc_ushort_nv(refcount); -#else - unsigned short res; - mutex_lock(&atomics_mutex); - (*refcount)++; - res = *refcount; - mutex_unlock(&atomics_mutex); - return res; -#endif -} - -unsigned short refcount_decr(unsigned short *refcount) { -#ifdef HAVE_GCC_ATOMICS - return __sync_sub_and_fetch(refcount, 1); -#elif defined(__sun) - return atomic_dec_ushort_nv(refcount); -#else - unsigned short res; - mutex_lock(&atomics_mutex); - (*refcount)--; - res = *refcount; - mutex_unlock(&atomics_mutex); - return res; -#endif -} - /* item_lock() must be held for an item before any modifications to either its * associated hash bucket, or the structure itself. * LRU modifications must hold the item lock, and the LRU lock. -- cgit v1.2.1