summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2012-01-10 15:21:08 -0800
committerdormando <dormando@rydia.net>2012-01-10 15:57:10 -0800
commit3b961388d14f9fa8516d388e96f95c1b798c41d3 (patch)
treee9d57f649ec876981206672ef69bbb4df455f043
parentf4983b2068d13e5dc71fc075c35a085e904999cf (diff)
downloadmemcached-3b961388d14f9fa8516d388e96f95c1b798c41d3.tar.gz
more portable refcount atomics
Most credit to Dustin and Trond for showing me the way, though I have no way of testing this myself. These should probably just be defines...
-rw-r--r--items.c12
-rw-r--r--memcached.h2
-rw-r--r--slabs.c4
-rw-r--r--thread.c38
4 files changed, 48 insertions, 8 deletions
diff --git a/items.c b/items.c
index faf43b5..43f77d9 100644
--- a/items.c
+++ b/items.c
@@ -104,7 +104,7 @@ item *do_item_alloc(char *key, const size_t nkey, const int flags, const rel_tim
rel_time_t oldest_live = settings.oldest_live;
search = tails[id];
- if (search != NULL && (__sync_add_and_fetch(&search->refcount, 1) == 2)) {
+ if (search != NULL && (refcount_incr(&search->refcount) == 2)) {
if ((search->exptime != 0 && search->exptime < current_time)
|| (search->time < oldest_live)) { // dead by flush
STATS_LOCK();
@@ -147,13 +147,13 @@ item *do_item_alloc(char *key, const size_t nkey, const int flags, const rel_tim
/* Initialize the item block: */
it->slabs_clsid = 0;
} else {
- __sync_sub_and_fetch(&search->refcount, 1);
+ refcount_decr(&search->refcount);
}
} else {
/* If the LRU is empty or locked, attempt to allocate memory */
it = slabs_alloc(ntotal, id);
if (search != NULL)
- __sync_sub_and_fetch(&search->refcount, 1);
+ refcount_decr(&search->refcount);
}
if (it == NULL) {
@@ -287,7 +287,7 @@ int do_item_link(item *it, const uint32_t hv) {
ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0);
assoc_insert(it, hv);
item_link_q(it);
- __sync_add_and_fetch(&it->refcount, 1);
+ refcount_incr(&it->refcount);
pthread_mutex_unlock(&cache_lock);
return 1;
@@ -328,7 +328,7 @@ void do_item_remove(item *it) {
MEMCACHED_ITEM_REMOVE(ITEM_key(it), it->nkey, it->nbytes);
assert((it->it_flags & ITEM_SLABBED) == 0);
- if (__sync_sub_and_fetch(&it->refcount, 1) == 0) {
+ if (refcount_decr(&it->refcount) == 0) {
item_free(it);
}
}
@@ -484,7 +484,7 @@ item *do_item_get(const char *key, const size_t nkey, const uint32_t hv) {
mutex_lock(&cache_lock);
item *it = assoc_find(key, nkey, hv);
if (it != NULL) {
- __sync_add_and_fetch(&it->refcount, 1);
+ refcount_incr(&it->refcount);
/* Optimization for slab reassignment. prevents popular items from
* jamming in busy wait. Can only do this here to satisfy lock order
* of item_lock, cache_lock, slabs_lock. */
diff --git a/memcached.h b/memcached.h
index b4dfb3b..97a79d0 100644
--- a/memcached.h
+++ b/memcached.h
@@ -534,6 +534,8 @@ void item_update(item *it);
void item_lock(uint32_t hv);
void item_unlock(uint32_t hv);
+unsigned short refcount_incr(unsigned short *refcount);
+unsigned short refcount_decr(unsigned short *refcount);
void STATS_LOCK(void);
void STATS_UNLOCK(void);
void threadlocal_stats_reset(void);
diff --git a/slabs.c b/slabs.c
index a2757d0..2483fbf 100644
--- a/slabs.c
+++ b/slabs.c
@@ -547,7 +547,7 @@ static int slab_rebalance_move(void) {
item *it = slab_rebal.slab_pos;
status = MOVE_PASS;
if (it->slabs_clsid != 255) {
- refcount = __sync_add_and_fetch(&it->refcount, 1);
+ refcount = refcount_incr(&it->refcount);
if (refcount == 1) { /* item is unlinked, unused */
if (it->it_flags & ITEM_SLABBED) {
/* remove from slab freelist */
@@ -589,7 +589,7 @@ static int slab_rebalance_move(void) {
case MOVE_BUSY:
slab_rebal.busy_items++;
was_busy++;
- __sync_sub_and_fetch(&it->refcount, 1);
+ refcount_decr(&it->refcount);
break;
case MOVE_PASS:
break;
diff --git a/thread.c b/thread.c
index 59063e6..5735376 100644
--- a/thread.c
+++ b/thread.c
@@ -11,6 +11,10 @@
#include <string.h>
#include <pthread.h>
+#ifdef __sun
+#include <atomic.h>
+#endif
+
#define ITEMS_PER_ALLOC 64
/* An item in the connection queue. */
@@ -39,6 +43,10 @@ pthread_mutex_t cache_lock;
/* Connection lock around accepting new connections */
pthread_mutex_t conn_lock = PTHREAD_MUTEX_INITIALIZER;
+#if !defined(__GNUC__) && !defined(__sun)
+pthread_mutex_t atomics_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
/* Lock for global stats */
static pthread_mutex_t stats_lock;
@@ -70,6 +78,36 @@ static pthread_cond_t init_cond;
static void thread_libevent_process(int fd, short which, void *arg);
+inline unsigned short refcount_incr(unsigned short *refcount) {
+#ifdef __GNUC__
+ return __sync_add_and_fetch(refcount, 1);
+#elif defined(__sun)
+ return atomic_inc_ushort_nv(refcount);
+#else
+ unsigned short res;
+ mutex_lock(&atomics_mutex);
+ *refcount++;
+ res = *refcount;
+ pthread_mutex_unlock(&atomics_mutex);
+ return res;
+#endif
+}
+
+inline unsigned short refcount_decr(unsigned short *refcount) {
+#ifdef __GNUC__
+ return __sync_sub_and_fetch(refcount, 1);
+#elif defined(__sun)
+ return atomic_dec_ushort_nv(refcount);
+#else
+ unsigned short res;
+ mutex_lock(&atomics_mutex);
+ *refcount--;
+ res = *refcount;
+ pthread_mutex_unlock(&atomics_mutex);
+ return res;
+#endif
+}
+
void item_lock(uint32_t hv) {
mutex_lock(&item_locks[hv & item_lock_mask]);
}