summaryrefslogtreecommitdiff
path: root/items.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2017-01-09 01:03:09 -0800
committerdormando <dormando@rydia.net>2017-01-22 19:47:50 -0800
commitea773779cdf8621c4d1ca13fed2fff4c239d3608 (patch)
tree1f1e915a686b2ffd4f522453623e0a703e5ee393 /items.c
parentdcdcd8a551e4af2210b750bc848ec7daf967d938 (diff)
downloadmemcached-ea773779cdf8621c4d1ca13fed2fff4c239d3608.tar.gz
stop using atomics for item refcount management
when I first split the locks up further I had a trick where "item_remove()" did not require holding the associated item lock. If an item were to be freed, it would then do the necessary work.; Since then, all calls to refcount_incr and refcount_decr only happen while the item is locked. This was mostly due to the slab mover being very tricky with locks. The atomic is no longer needed as the refcount is only ever checked after a lock to the item. Calling atomics is pretty expensive, especially in multicore/multisocket scenarios. This yields a notable performance increase.
Diffstat (limited to 'items.c')
-rw-r--r--items.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/items.c b/items.c
index 23cbe6a..8955c31 100644
--- a/items.c
+++ b/items.c
@@ -371,7 +371,7 @@ int do_item_link(item *it, const uint32_t hv) {
ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0);
assoc_insert(it, hv);
item_link_q(it);
- refcount_incr(&it->refcount);
+ refcount_incr(it);
item_stats_sizes_add(it);
return 1;
@@ -413,7 +413,7 @@ void do_item_remove(item *it) {
assert((it->it_flags & ITEM_SLABBED) == 0);
assert(it->refcount > 0);
- if (refcount_decr(&it->refcount) == 0) {
+ if (refcount_decr(it) == 0) {
item_free(it);
}
}
@@ -763,7 +763,7 @@ void item_stats_sizes(ADD_STAT add_stats, void *c) {
item *do_item_get(const char *key, const size_t nkey, const uint32_t hv, conn *c, const bool do_update) {
item *it = assoc_find(key, nkey, hv);
if (it != NULL) {
- refcount_incr(&it->refcount);
+ refcount_incr(it);
/* Optimization for slab reassignment. prevents popular items from
* jamming in busy wait. Can only do this here to satisfy lock order
* of item_lock, slabs_lock. */
@@ -891,7 +891,7 @@ static int lru_pull_tail(const int orig_id, const int cur_lru,
if ((hold_lock = item_trylock(hv)) == NULL)
continue;
/* Now see if the item is refcount locked */
- if (refcount_incr(&search->refcount) != 2) {
+ if (refcount_incr(search) != 2) {
/* Note pathological case with ref'ed items in tail.
* Can still unlink the item, but it won't be reusable yet */
itemstats[id].lrutail_reflocked++;