summaryrefslogtreecommitdiff
path: root/assoc.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2020-11-09 15:47:39 -0800
committerdormando <dormando@rydia.net>2020-11-20 12:19:56 -0800
commitb2cee81823ba32adf09a6666d92aee56a8d98056 (patch)
treec7e7fb4763106f2d8860eda60f34742e4f184f2f /assoc.c
parent2896cc6c49c2bcd064e00da8bdf0dcaf1a481e73 (diff)
downloadmemcached-b2cee81823ba32adf09a6666d92aee56a8d98056.tar.gz
item crawler hash table walk mode
specifying 'hash' instead of 'all' will make the LRU crawler iterate over ever bucket in the hash table once, instead of what's in the LRU. This also doesn't suffer from missing items because of LRU reordering or high lock contention.
Diffstat (limited to 'assoc.c')
-rw-r--r--assoc.c69
1 files changed, 69 insertions, 0 deletions
diff --git a/assoc.c b/assoc.c
index 1a6b845..64405a6 100644
--- a/assoc.c
+++ b/assoc.c
@@ -292,3 +292,72 @@ void stop_assoc_maintenance_thread() {
pthread_join(maintenance_tid, NULL);
}
+struct assoc_iterator {
+ unsigned int bucket;
+ bool bucket_locked;
+ item *it;
+ item *next;
+};
+
+void *assoc_get_iterator(void) {
+ struct assoc_iterator *iter = calloc(1, sizeof(struct assoc_iterator));
+ if (iter == NULL) {
+ return NULL;
+ }
+ // this will hang the caller while a hash table expansion is running.
+ mutex_lock(&maintenance_lock);
+ return iter;
+}
+
+bool assoc_iterate(void *iterp, item **it) {
+ struct assoc_iterator *iter = (struct assoc_iterator *) iterp;
+ *it = NULL;
+ // - if locked bucket and next, update next and return
+ if (iter->bucket_locked) {
+ if (iter->next != NULL) {
+ iter->it = iter->next;
+ iter->next = iter->it->h_next;
+ *it = iter->it;
+ } else {
+ // unlock previous bucket, if any
+ item_unlock(iter->bucket);
+ // iterate the bucket post since it starts at 0.
+ iter->bucket++;
+ iter->bucket_locked = false;
+ *it = NULL;
+ }
+ return true;
+ }
+
+ // - loop until we hit the end or find something.
+ if (iter->bucket != hashsize(hashpower)) {
+ // - lock next bucket
+ item_lock(iter->bucket);
+ iter->bucket_locked = true;
+ // - only check the primary hash table since expand is blocked.
+ iter->it = primary_hashtable[iter->bucket];
+ if (iter->it != NULL) {
+ // - set it, next and return
+ iter->next = iter->it->h_next;
+ *it = iter->it;
+ } else {
+ // - nothing found in this bucket, try next.
+ item_unlock(iter->bucket);
+ iter->bucket_locked = false;
+ iter->bucket++;
+ }
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+void assoc_iterate_final(void *iterp) {
+ struct assoc_iterator *iter = (struct assoc_iterator *) iterp;
+ if (iter->bucket_locked) {
+ item_unlock(iter->bucket);
+ }
+ mutex_unlock(&maintenance_lock);
+ free(iter);
+}