summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2015-02-06 17:03:05 -0800
committerdormando <dormando@rydia.net>2015-02-06 17:03:05 -0800
commit5d7dc88da3e7864c202cf01ff7873a2550c134d4 (patch)
treea3643d2f6306ef4eb3c3d38bfc982f7d4c616de9
parentc1f0262ec5bbc7ce7f9c6751caa2a6dfd7f8f259 (diff)
downloadmemcached-5d7dc88da3e7864c202cf01ff7873a2550c134d4.tar.gz
basic lock around hash_items counter
could/should be an atomic. Previously all write mutations were wrapped with cache_lock, but that's not the case anymore. Just enforce consistency around the hash_items counter, which is used for hash table expansion.
-rw-r--r--assoc.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/assoc.c b/assoc.c
index 066e00c..e6cf09b 100644
--- a/assoc.c
+++ b/assoc.c
@@ -27,6 +27,7 @@
static pthread_cond_t maintenance_cond = PTHREAD_COND_INITIALIZER;
static pthread_mutex_t maintenance_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t hash_items_counter_lock = PTHREAD_MUTEX_INITIALIZER;
typedef unsigned long int ub4; /* unsigned 4-byte quantities */
typedef unsigned char ub1; /* unsigned 1-byte quantities */
@@ -167,10 +168,12 @@ int assoc_insert(item *it, const uint32_t hv) {
primary_hashtable[hv & hashmask(hashpower)] = it;
}
+ pthread_mutex_lock(&hash_items_counter_lock);
hash_items++;
if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) {
assoc_start_expand();
}
+ pthread_mutex_unlock(&hash_items_counter_lock);
MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items);
return 1;
@@ -181,7 +184,9 @@ void assoc_delete(const char *key, const size_t nkey, const uint32_t hv) {
if (*before) {
item *nxt;
+ pthread_mutex_lock(&hash_items_counter_lock);
hash_items--;
+ pthread_mutex_unlock(&hash_items_counter_lock);
/* The DTrace probe cannot be triggered as the last instruction
* due to possible tail-optimization by the compiler
*/