summaryrefslogtreecommitdiff
path: root/thread.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2017-01-09 01:03:09 -0800
committerdormando <dormando@rydia.net>2017-01-22 19:47:50 -0800
commitea773779cdf8621c4d1ca13fed2fff4c239d3608 (patch)
tree1f1e915a686b2ffd4f522453623e0a703e5ee393 /thread.c
parentdcdcd8a551e4af2210b750bc848ec7daf967d938 (diff)
downloadmemcached-ea773779cdf8621c4d1ca13fed2fff4c239d3608.tar.gz
stop using atomics for item refcount management
when I first split the locks up further I had a trick where "item_remove()" did not require holding the associated item lock. If an item were to be freed, it would then do the necessary work.; Since then, all calls to refcount_incr and refcount_decr only happen while the item is locked. This was mostly due to the slab mover being very tricky with locks. The atomic is no longer needed as the refcount is only ever checked after a lock to the item. Calling atomics is pretty expensive, especially in multicore/multisocket scenarios. This yields a notable performance increase.
Diffstat (limited to 'thread.c')
-rw-r--r--thread.c30
1 files changed, 0 insertions, 30 deletions
diff --git a/thread.c b/thread.c
index 2908d57..baae274 100644
--- a/thread.c
+++ b/thread.c
@@ -79,36 +79,6 @@ static pthread_cond_t init_cond;
static void thread_libevent_process(int fd, short which, void *arg);
-unsigned short refcount_incr(unsigned short *refcount) {
-#ifdef HAVE_GCC_ATOMICS
- return __sync_add_and_fetch(refcount, 1);
-#elif defined(__sun)
- return atomic_inc_ushort_nv(refcount);
-#else
- unsigned short res;
- mutex_lock(&atomics_mutex);
- (*refcount)++;
- res = *refcount;
- mutex_unlock(&atomics_mutex);
- return res;
-#endif
-}
-
-unsigned short refcount_decr(unsigned short *refcount) {
-#ifdef HAVE_GCC_ATOMICS
- return __sync_sub_and_fetch(refcount, 1);
-#elif defined(__sun)
- return atomic_dec_ushort_nv(refcount);
-#else
- unsigned short res;
- mutex_lock(&atomics_mutex);
- (*refcount)--;
- res = *refcount;
- mutex_unlock(&atomics_mutex);
- return res;
-#endif
-}
-
/* item_lock() must be held for an item before any modifications to either its
* associated hash bucket, or the structure itself.
* LRU modifications must hold the item lock, and the LRU lock.