/* Hash Tables Implementation. * * This file implements in memory hash tables with insert/del/replace/find/ * get-random-element operations. Hash tables will auto resize if needed * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * * Copyright (c) 2006-2012, Salvatore Sanfilippo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include #include #include #include #include #include #include #include "dict.h" #include "zmalloc.h" #ifndef DICT_BENCHMARK_MAIN #include "redisassert.h" #else #include #endif /* Using dictEnableResize() / dictDisableResize() we make possible to * enable/disable resizing of the hash table as needed. This is very important * for Redis, as we use copy-on-write and don't want to move too much memory * around when there is a child performing saving operations. * * Note that even when dict_can_resize is set to 0, not all resizes are * prevented: a hash table is still allowed to grow if the ratio between * the number of elements and the buckets > dict_force_resize_ratio. */ static int dict_can_resize = 1; static unsigned int dict_resize_ratio = 10; static unsigned int dict_force_resize_ratio = 60; /* -------------------------- private prototypes ---------------------------- */ static int _dictExpandIfNeeded(dict *ht); static unsigned long _dictNextPower(unsigned long size); static int _dictKeyIndex(dict *ht, const void *key, unsigned int *hash); static int _dictInit(dict *ht, dictType *type, void *privDataPtr); /* -------------------------- hash functions -------------------------------- */ static uint32_t dict_hash_function_seed = 5381; void dictSetHashFunctionSeed(uint32_t seed) { dict_hash_function_seed = seed; } uint32_t dictGetHashFunctionSeed(void) { return dict_hash_function_seed; } /* MurmurHash2, by Austin Appleby * Note - This code makes a few assumptions about how your machine behaves - * 1. We can read a 4-byte value from any address without crashing * 2. sizeof(int) == 4 * * And it has a few limitations - * * 1. It will not work incrementally. * 2. It will not produce the same results on little-endian and big-endian * machines. */ unsigned int dictGenHashFunction(const void *key, int len) { /* 'm' and 'r' are mixing constants generated offline. They're not really 'magic', they just happen to work well. */ uint32_t seed = dict_hash_function_seed; const uint32_t m = 0x5bd1e995; const int r = 24; /* Initialize the hash to a 'random' value */ uint32_t h = seed ^ len; /* Mix 4 bytes at a time into the hash */ const unsigned char *data = (const unsigned char *)key; while(len >= 4) { uint32_t k = *(uint32_t*)data; k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; data += 4; len -= 4; } /* Handle the last few bytes of the input array */ switch(len) { case 3: h ^= data[2] << 16; case 2: h ^= data[1] << 8; case 1: h ^= data[0]; h *= m; }; /* Do a few final mixes of the hash to ensure the last few * bytes are well-incorporated. */ h ^= h >> 13; h *= m; h ^= h >> 15; return (unsigned int)h; } /* And a case insensitive hash function (based on djb hash) */ unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len) { unsigned int hash = (unsigned int)dict_hash_function_seed; while (len--) hash = ((hash << 5) + hash) + (tolower(*buf++)); /* hash * 33 + c */ return hash; } /* ----------------------------- API implementation ------------------------- */ /* Push a new entry in a vector of entries, taking care of allocating or * reallocating the vector if needed. If 'orig_entries' is non zero it * means that we are moving entries in the course of rehashing to a bigger * table, from a vector that had 'orig_entires' entries. The function can * use this hint to make better reallocation choices. */ dictEntry *dictPushEntry(dictEntryVector **table, unsigned int idx, const void *key, const void *val, unsigned int hash, uint32_t orig_entries) { dictEntryVector *dv = table[idx]; dictEntry *he; if (dv == NULL) { int initlen; if (orig_entries) { /* If we are rehashing to a larger table, in the average this * slot will take orig_entries/2 slots, assuming the new table * is double in size. */ initlen = orig_entries/2; } else { initlen = dict_resize_ratio/4; } if (initlen == 0) initlen = 1; dv = table[idx] = zcalloc(sizeof(dictEntryVector)+sizeof(dictEntry)*initlen); dv->used = 1; dv->free = initlen-1; he = dv->entry; } else if (dv->free == 0) { int newlen = dv->used*2; dv = table[idx] = zrealloc(table[idx],sizeof(dictEntryVector)+sizeof(dictEntry)*newlen); he = dv->entry+dv->used; dv->used++; dv->free = newlen - dv->used; memset(dv->entry+dv->used,0,sizeof(dictEntry)*dv->free); } else { uint32_t entries = dv->used+dv->free; /* Start iterating with j set to the number of buckets already * used: it is likely there are empty buckets at the end after * a reallocation of the entries vector. */ uint32_t j = dv->used; uint32_t i = entries; while (i--) { if (dv->entry[j].key == NULL) break; j = (j+1)%entries; } he = dv->entry+j; dv->used++; dv->free--; } he->key = (void*) key; he->v.val = (void*) val; he->hash = hash; return he; } /* Reset a hash table already initialized with ht_init(). * NOTE: This function should only be called by ht_destroy(). */ static void _dictReset(dictht *ht) { ht->table = NULL; ht->size = 0; ht->sizemask = 0; ht->used = 0; } /* Create a new hash table */ dict *dictCreate(dictType *type, void *privDataPtr) { dict *d = zmalloc(sizeof(*d)); _dictInit(d,type,privDataPtr); return d; } /* Initialize the hash table */ int _dictInit(dict *d, dictType *type, void *privDataPtr) { _dictReset(&d->ht[0]); _dictReset(&d->ht[1]); d->type = type; d->privdata = privDataPtr; d->rehashidx = -1; d->iterators = 0; return DICT_OK; } /* Resize the table to the minimal size that contains all the elements, * but with the invariant of a USED/BUCKETS ratio near to <= 1 */ int dictResize(dict *d) { int minimal; if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR; minimal = d->ht[0].used; if (minimal < DICT_HT_INITIAL_SIZE) minimal = DICT_HT_INITIAL_SIZE; return dictExpand(d, minimal); } /* Expand or create the hash table */ int dictExpand(dict *d, unsigned long size) { dictht n; /* the new hash table */ unsigned long realsize = _dictNextPower(size); /* the size is invalid if it is smaller than the number of * elements already inside the hash table */ if (dictIsRehashing(d)) return DICT_ERR; /* Rehashing to the same table size is not useful. */ if (realsize == d->ht[0].size) return DICT_ERR; /* Allocate the new hash table and initialize all pointers to NULL */ n.size = realsize; n.sizemask = realsize-1; n.table = zcalloc(realsize*sizeof(dictEntryVector*)); n.used = 0; /* Is this the first initialization? If so it's not really a rehashing * we just set the first hash table so that it can accept keys. */ if (d->ht[0].table == NULL) { d->ht[0] = n; return DICT_OK; } /* Prepare a second hash table for incremental rehashing */ d->ht[1] = n; d->rehashidx = 0; return DICT_OK; } /* Expand the dictionary to the optimal number of elements needed to * hold 'entries' keys. */ int dictExpandToOptimalSize(dict *d, unsigned long entries) { return dictExpand(d,entries/(dict_resize_ratio/2)); } /* Performs N steps of incremental rehashing. Returns 1 if there are still * keys to move from the old to the new hash table, otherwise 0 is returned. * * Note that a rehashing step consists in moving a bucket (that may have more * than one key as we use chaining) from the old to the new hash table, however * since part of the hash table may be composed of empty spaces, it is not * guaranteed that this function will rehash even a single bucket, since it * will visit at max N*10 empty buckets in total, otherwise the amount of * work it does would be unbound and the function may block for a long time. */ int dictRehash(dict *d, int n) { int empty_visits = n*10; /* Max number of empty buckets to visit. */ if (!dictIsRehashing(d)) return 0; int to_smaller = d->ht[0].size > d->ht[1].size; /* Moving to smaller tab? */ while(n-- && d->ht[0].used != 0) { dictEntryVector *dv; /* Note that rehashidx can't overflow as we are sure there are more * elements because ht[0].used != 0 */ assert(d->ht[0].size > (unsigned long)d->rehashidx); while(d->ht[0].table[d->rehashidx] == NULL) { d->rehashidx++; if (--empty_visits == 0) return 1; } dv = d->ht[0].table[d->rehashidx]; /* Move all the keys in this bucket from the old to the new hash HT. * TODO: if the table we are rehasing to is smaller than the current * table, we can just move the whole entries vector from one table * to the next, since all the entries of this table will hash into * the same slot of the target table. */ uint32_t entries = dv ? (dv->used + dv->free) : 0; uint32_t entries_hint = to_smaller ? 0 : entries; uint32_t j; for (j = 0; j < entries; j++) { unsigned int h; /* Get the index in the new hash table */ if (dv->entry[j].key == NULL) continue; h = dv->entry[j].hash & d->ht[1].sizemask; dictPushEntry(d->ht[1].table,h,dv->entry[j].key,dv->entry[j].v.val, dv->entry[j].hash, entries_hint); d->ht[1].used++; d->ht[0].used--; } zfree(dv); d->ht[0].table[d->rehashidx] = NULL; d->rehashidx++; } /* Check if we already rehashed the whole table... */ if (d->ht[0].used == 0) { zfree(d->ht[0].table); d->ht[0] = d->ht[1]; _dictReset(&d->ht[1]); d->rehashidx = -1; return 0; } /* More to rehash... */ return 1; } long long timeInMilliseconds(void) { struct timeval tv; gettimeofday(&tv,NULL); return (((long long)tv.tv_sec)*1000)+(tv.tv_usec/1000); } /* Rehash for an amount of time between ms milliseconds and ms+1 milliseconds */ int dictRehashMilliseconds(dict *d, int ms) { long long start = timeInMilliseconds(); int rehashes = 0; while(dictRehash(d,100)) { rehashes += 100; if (timeInMilliseconds()-start > ms) break; } return rehashes; } /* This function performs just a step of rehashing, and only if there are * no safe iterators bound to our hash table. When we have iterators in the * middle of a rehashing we can't mess with the two hash tables otherwise * some element can be missed or duplicated. * * This function is called by common lookup or update operations in the * dictionary so that the hash table automatically migrates from H1 to H2 * while it is actively used. */ static void _dictRehashStep(dict *d) { if (d->iterators == 0) dictRehash(d,1); } /* Add an element to the target hash table */ int dictAdd(dict *d, void *key, void *val) { dictEntry *entry = dictAddRaw(d,key); if (!entry) return DICT_ERR; dictSetVal(d, entry, val); return DICT_OK; } /* Low level add. This function adds the entry but instead of setting * a value returns the dictEntry structure to the user, that will make * sure to fill the value field as he wishes. * * This function is also directly exposed to the user API to be called * mainly in order to store non-pointers inside the hash value, example: * * entry = dictAddRaw(dict,mykey); * if (entry != NULL) dictSetSignedIntegerVal(entry,1000); * * Return values: * * If key already exists NULL is returned. * If key was added, the hash entry is returned to be manipulated by the caller. */ dictEntry *dictAddRaw(dict *d, void *key) { int index; unsigned int hash; dictEntry *entry; dictht *ht; if (dictIsRehashing(d)) _dictRehashStep(d); /* Get the index of the new element, or -1 if * the element already exists. */ if ((index = _dictKeyIndex(d, key, &hash)) == -1) return NULL; /* Store the new entry: if we are rehashing all new entries go to * the new table. */ ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0]; entry = dictPushEntry(ht->table,index,key,NULL,hash,0); ht->used++; return entry; } /* Add an element, discarding the old if the key already exists. * Return 1 if the key was added from scratch, 0 if there was already an * element with such key and dictReplace() just performed a value update * operation. */ int dictReplace(dict *d, void *key, void *val) { dictEntry *entry, auxentry; /* Try to add the element. If the key * does not exists dictAdd will suceed. */ if (dictAdd(d, key, val) == DICT_OK) return 1; /* It already exists, get the entry */ entry = dictFind(d, key); /* Set the new value and free the old one. Note that it is important * to do that in this order, as the value may just be exactly the same * as the previous one. In this context, think to reference counting, * you want to increment (set), and then decrement (free), and not the * reverse. */ auxentry = *entry; dictSetVal(d, entry, val); dictFreeVal(d, &auxentry); return 0; } /* dictReplaceRaw() is simply a version of dictAddRaw() that always * returns the hash entry of the specified key, even if the key already * exists and can't be added (in that case the entry of the already * existing key is returned.) * * See dictAddRaw() for more information. */ dictEntry *dictReplaceRaw(dict *d, void *key) { dictEntry *entry = dictFind(d,key); return entry ? entry : dictAddRaw(d,key); } /* Search and remove an element */ static int dictGenericDelete(dict *d, const void *key, int nofree) { unsigned int h, idx; dictEntryVector *dv; int table; if (d->ht[0].size == 0) return DICT_ERR; /* d->ht[0].table is NULL */ if (dictIsRehashing(d)) _dictRehashStep(d); h = dictHashKey(d, key); for (table = 0; table <= 1; table++) { idx = h & d->ht[table].sizemask; dv = d->ht[table].table[idx]; if (dv != NULL) { uint32_t entries = dv->used + dv->free; uint32_t j; for (j = 0; j < entries; j++) { dictEntry *he = dv->entry+j; if (he->key == NULL || he->hash != h) continue; if (key==he->key || dictCompareKeys(d, key, he->key)) { if (!nofree) { dictFreeKey(d, he); dictFreeVal(d, he); } he->key = NULL; /* No need to clear val / hash fields. The key set * to NULL is enough to mark the slot as empty. */ d->ht[table].used--; dv->free++; dv->used--; if (dv->used == 0) { zfree(dv); d->ht[table].table[idx] = NULL; } /* TODO: Compact the entries vector if there are not * safe iterators active? Alternatively we could do it * in a single place when scanning entries for read * accesses. */ return DICT_OK; } } } if (!dictIsRehashing(d)) break; } return DICT_ERR; /* not found */ } int dictDelete(dict *ht, const void *key) { return dictGenericDelete(ht,key,0); } int dictDeleteNoFree(dict *ht, const void *key) { return dictGenericDelete(ht,key,1); } /* Destroy an entire dictionary */ int _dictClear(dict *d, dictht *ht, void(callback)(void *)) { unsigned long i; /* Free all the elements */ for (i = 0; i < ht->size && ht->used > 0; i++) { dictEntryVector *dv; if (callback && (i & 65535) == 0) callback(d->privdata); if ((dv = ht->table[i]) == NULL) continue; uint32_t entries = dv->used + dv->free; uint32_t j; for (j = 0; j < entries; j++) { dictEntry *he = dv->entry+j; if (he->key == NULL) continue; dictFreeKey(d, he); dictFreeVal(d, he); ht->used--; } zfree(dv); ht->table[i] = NULL; } /* Free the table itself. */ zfree(ht->table); /* Re-initialize the table */ _dictReset(ht); return DICT_OK; /* never fails */ } /* Clear & Release the hash table */ void dictRelease(dict *d) { _dictClear(d,&d->ht[0],NULL); _dictClear(d,&d->ht[1],NULL); zfree(d); } void debugDv(dictEntryVector *dv) { uint32_t entries = dv->used + dv->free; uint32_t j; for (j = 0; j < entries; j++) { unsigned int h2 = (dv->entry[j].hash<<5) + (dv->entry[j].hash>>16); int target = h2 % entries; if (dv->entry[j].key == NULL) target = -1; printf("%d [target=%u]: %s\n", j, target, dv->entry[j].key); } } dictEntry *dictFind(dict *d, const void *key) { dictEntryVector *dv; unsigned int h, idx, table; if (d->ht[0].used + d->ht[1].used == 0) return NULL; /* dict is empty */ if (dictIsRehashing(d)) _dictRehashStep(d); h = dictHashKey(d, key); for (table = 0; table <= 1; table++) { idx = h & d->ht[table].sizemask; dv = d->ht[table].table[idx]; if (dv != NULL) { // printf("LOOKUP %s\n", key); // debugDv(dv); uint32_t entries = dv->used + dv->free; /* In order to scan less entries, we use the entries vector * as a sub hash table. The entry is not really guaranteed to * be there, however we try to access it using this pattern and * if possible we also swap the entries so that the next time * we'll succeed in accessing the entry at the first try. */ unsigned int h2 = (h<<5)+(h>>16); uint32_t j = h2 % entries, i = entries; // printf("SCAN: "); while(i--) { // printf("%d ",j); dictEntry *he = dv->entry+j; if (he->key == NULL || he->hash != h) { j = (j+1) % entries; continue; } if (key==he->key || dictCompareKeys(d, key, he->key)) { #if 1 if (d->iterators == 0 && /* No interators with indexes. */ dict_can_resize && /* No copy on write concerns. */ i != entries-1 /* Not already in right place. */) { /* Try to find a suitable position... */ uint32_t destpos, k; for (k = 0; k < 3; k++) { destpos = (h2+k) % entries; uint32_t target_h2 = (dv->entry[destpos].hash << 5) + (dv->entry[destpos].hash >> 16); if (dv->entry[destpos].key == NULL || (target_h2 % entries) != destpos) break; /* Valid position found. */ } if (k != 3) { dictEntry copy = *he; dv->entry[j] = dv->entry[destpos];; dv->entry[destpos] = copy; he = dv->entry+destpos; } } #endif // printf("\n\n"); return he; } j = (j+1) % entries; } } if (!dictIsRehashing(d)) return NULL; } return NULL; } void *dictFetchValue(dict *d, const void *key) { dictEntry *he; he = dictFind(d,key); return he ? dictGetVal(he) : NULL; } /* A fingerprint is a 64 bit number that represents the state of the dictionary * at a given time, it's just a few dict properties xored together. * When an unsafe iterator is initialized, we get the dict fingerprint, and * check the fingerprint again when the iterator is released. * If the two fingerprints are different it means that the user of the iterator * performed forbidden operations against the dictionary while iterating. */ long long dictFingerprint(dict *d) { long long integers[6], hash = 0; int j; integers[0] = (long) d->ht[0].table; integers[1] = d->ht[0].size; integers[2] = d->ht[0].used; integers[3] = (long) d->ht[1].table; integers[4] = d->ht[1].size; integers[5] = d->ht[1].used; /* We hash N integers by summing every successive integer with the integer * hashing of the previous sum. Basically: * * Result = hash(hash(hash(int1)+int2)+int3) ... * * This way the same set of integers in a different order will (likely) * hash to a different number. */ for (j = 0; j < 6; j++) { hash += integers[j]; /* For the hashing step we use Tomas Wang's 64 bit integer hash. */ hash = (~hash) + (hash << 21); // hash = (hash << 21) - hash - 1; hash = hash ^ (hash >> 24); hash = (hash + (hash << 3)) + (hash << 8); // hash * 265 hash = hash ^ (hash >> 14); hash = (hash + (hash << 2)) + (hash << 4); // hash * 21 hash = hash ^ (hash >> 28); hash = hash + (hash << 31); } return hash; } dictIterator *dictGetIterator(dict *d) { dictIterator *iter = zmalloc(sizeof(*iter)); iter->d = d; iter->table = 0; iter->index = -1; iter->safe = 0; iter->entry = -1; return iter; } dictIterator *dictGetSafeIterator(dict *d) { dictIterator *i = dictGetIterator(d); i->safe = 1; return i; } dictEntry *dictNext(dictIterator *iter) { while (1) { if (iter->entry == -1) { dictht *ht = &iter->d->ht[iter->table]; if (iter->index == -1 && iter->table == 0) { if (iter->safe) iter->d->iterators++; else iter->fingerprint = dictFingerprint(iter->d); } iter->index++; if (iter->index >= (long) ht->size) { if (dictIsRehashing(iter->d) && iter->table == 0) { iter->table++; iter->index = 0; ht = &iter->d->ht[1]; } else { break; } } iter->entry = 0; } else { iter->entry++; } dictEntryVector *dv = iter->d->ht[iter->table].table[iter->index]; if (dv == NULL || (unsigned)iter->entry >= (dv->used + dv->free)) iter->entry = -1; if (iter->entry >= 0 && dv->entry[iter->entry].key != NULL) { return dv->entry+iter->entry; } } return NULL; } void dictReleaseIterator(dictIterator *iter) { if (!(iter->index == -1 && iter->table == 0)) { if (iter->safe) iter->d->iterators--; else assert(iter->fingerprint == dictFingerprint(iter->d)); } zfree(iter); } /* Return a random entry from the hash table. Useful to * implement randomized algorithms */ dictEntry *dictGetRandomKey(dict *d) { dictEntryVector *dv; unsigned int h; if (dictSize(d) == 0) return NULL; if (dictIsRehashing(d)) _dictRehashStep(d); if (dictIsRehashing(d)) { do { /* We are sure there are no elements in indexes from 0 * to rehashidx-1 */ h = d->rehashidx + (random() % (d->ht[0].size + d->ht[1].size - d->rehashidx)); dv = (h >= d->ht[0].size) ? d->ht[1].table[h - d->ht[0].size] : d->ht[0].table[h]; } while(dv == NULL); } else { do { h = random() & d->ht[0].sizemask; dv = d->ht[0].table[h]; } while(dv == NULL); } /* Now we found a non empty vector. We need to get a random element from * the vector now. */ dictEntry *he = NULL; while(he == NULL) { uint32_t r = random() % (dv->used + dv->free); if (dv->entry[r].key != NULL) { he = dv->entry+r; break; } } return he; } /* This function samples the dictionary to return a few keys from random * locations. * * It does not guarantee to return all the keys specified in 'count', nor * it does guarantee to return non-duplicated elements, however it will make * some effort to do both things. * * Returned pointers to hash table entries are stored into 'des' that * points to an array of dictEntry pointers. The array must have room for * at least 'count' elements, that is the argument we pass to the function * to tell how many random elements we need. * * The function returns the number of items stored into 'des', that may * be less than 'count' if the hash table has less than 'count' elements * inside, or if not enough elements were found in a reasonable amount of * steps. * * Note that this function is not suitable when you need a good distribution * of the returned items, but only when you need to "sample" a given number * of continuous elements to run some kind of algorithm or to produce * statistics. However the function is much faster than dictGetRandomKey() * at producing N elements. */ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { unsigned long j; /* internal hash table id, 0 or 1. */ unsigned long tables; /* 1 or 2 tables? */ unsigned long stored = 0, maxsizemask; unsigned long maxsteps; if (dictSize(d) < count) count = dictSize(d); maxsteps = count*10; /* Try to do a rehashing work proportional to 'count'. */ for (j = 0; j < count; j++) { if (dictIsRehashing(d)) _dictRehashStep(d); else break; } tables = dictIsRehashing(d) ? 2 : 1; maxsizemask = d->ht[0].sizemask; if (tables > 1 && maxsizemask < d->ht[1].sizemask) maxsizemask = d->ht[1].sizemask; /* Pick a random point inside the larger table. */ unsigned long i = random() & maxsizemask; unsigned long emptylen = 0; /* Continuous empty entries so far. */ while(stored < count && maxsteps--) { for (j = 0; j < tables; j++) { /* Invariant of the dict.c rehashing: up to the indexes already * visited in ht[0] during the rehashing, there are no populated * buckets, so we can skip ht[0] for indexes between 0 and idx-1. */ if (tables == 2 && j == 0 && i < (unsigned long) d->rehashidx) { /* Moreover, if we are currently out of range in the second * table, there will be no elements in both tables up to * the current rehashing index, so we jump if possible. * (this happens when going from big to small table). */ if (i >= d->ht[1].size) i = d->rehashidx; continue; } if (i >= d->ht[j].size) continue; /* Out of range for this table. */ dictEntryVector *dv = d->ht[j].table[i]; /* Count contiguous empty buckets, and jump to other * locations if they reach 'count' (with a minimum of 5). */ if (dv == NULL) { emptylen++; if (emptylen >= 5 && emptylen > count) { i = random() & maxsizemask; emptylen = 0; } } else { emptylen = 0; uint32_t entries = dv->used + dv->free; uint32_t j; for (j = 0; j < entries; j++) { if (dv->entry[j].key != NULL) { /* Collect all the elements of the buckets found non * empty while iterating. */ *des = dv->entry+j; des++; stored++; if (stored == count) return stored; } } } } i = (i+1) & maxsizemask; } return stored; } /* Function to reverse bits. Algorithm from: * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */ static unsigned long rev(unsigned long v) { unsigned long s = 8 * sizeof(v); // bit size; must be power of 2 unsigned long mask = ~0; while ((s >>= 1) > 0) { mask ^= (mask << s); v = ((v >> s) & mask) | ((v << s) & ~mask); } return v; } /* dictScan() is used to iterate over the elements of a dictionary. * * Iterating works the following way: * * 1) Initially you call the function using a cursor (v) value of 0. * 2) The function performs one step of the iteration, and returns the * new cursor value you must use in the next call. * 3) When the returned cursor is 0, the iteration is complete. * * The function guarantees all elements present in the * dictionary get returned between the start and end of the iteration. * However it is possible some elements get returned multiple times. * * For every element returned, the callback argument 'fn' is * called with 'privdata' as first argument and the dictionary entry * 'de' as second argument. * * HOW IT WORKS. * * The iteration algorithm was designed by Pieter Noordhuis. * The main idea is to increment a cursor starting from the higher order * bits. That is, instead of incrementing the cursor normally, the bits * of the cursor are reversed, then the cursor is incremented, and finally * the bits are reversed again. * * This strategy is needed because the hash table may be resized between * iteration calls. * * dict.c hash tables are always power of two in size, and they * use chaining, so the position of an element in a given table is given * by computing the bitwise AND between Hash(key) and SIZE-1 * (where SIZE-1 is always the mask that is equivalent to taking the rest * of the division between the Hash of the key and SIZE). * * For example if the current hash table size is 16, the mask is * (in binary) 1111. The position of a key in the hash table will always be * the last four bits of the hash output, and so forth. * * WHAT HAPPENS IF THE TABLE CHANGES IN SIZE? * * If the hash table grows, elements can go anywhere in one multiple of * the old bucket: for example let's say we already iterated with * a 4 bit cursor 1100 (the mask is 1111 because hash table size = 16). * * If the hash table will be resized to 64 elements, then the new mask will * be 111111. The new buckets you obtain by substituting in ??1100 * with either 0 or 1 can be targeted only by keys we already visited * when scanning the bucket 1100 in the smaller hash table. * * By iterating the higher bits first, because of the inverted counter, the * cursor does not need to restart if the table size gets bigger. It will * continue iterating using cursors without '1100' at the end, and also * without any other combination of the final 4 bits already explored. * * Similarly when the table size shrinks over time, for example going from * 16 to 8, if a combination of the lower three bits (the mask for size 8 * is 111) were already completely explored, it would not be visited again * because we are sure we tried, for example, both 0111 and 1111 (all the * variations of the higher bit) so we don't need to test it again. * * WAIT... YOU HAVE *TWO* TABLES DURING REHASHING! * * Yes, this is true, but we always iterate the smaller table first, then * we test all the expansions of the current cursor into the larger * table. For example if the current cursor is 101 and we also have a * larger table of size 16, we also test (0)101 and (1)101 inside the larger * table. This reduces the problem back to having only one table, where * the larger one, if it exists, is just an expansion of the smaller one. * * LIMITATIONS * * This iterator is completely stateless, and this is a huge advantage, * including no additional memory used. * * The disadvantages resulting from this design are: * * 1) It is possible we return elements more than once. However this is usually * easy to deal with in the application level. * 2) The iterator must return multiple elements per call, as it needs to always * return all the keys chained in a given bucket, and all the expansions, so * we are sure we don't miss keys moving during rehashing. * 3) The reverse cursor is somewhat hard to understand at first, but this * comment is supposed to help. */ unsigned long dictScan(dict *d, unsigned long v, dictScanFunction *fn, void *privdata) { dictht *t0, *t1; const dictEntryVector *dv; uint32_t j; unsigned long m0, m1; if (dictSize(d) == 0) return 0; if (!dictIsRehashing(d)) { t0 = &(d->ht[0]); m0 = t0->sizemask; /* Emit entries at cursor */ dv = t0->table[v & m0]; for (j = 0; dv && j < (dv->used+dv->free); j++) { if (dv->entry[j].key == NULL) continue; fn(privdata, dv->entry+j); } } else { t0 = &d->ht[0]; t1 = &d->ht[1]; /* Make sure t0 is the smaller and t1 is the bigger table */ if (t0->size > t1->size) { t0 = &d->ht[1]; t1 = &d->ht[0]; } m0 = t0->sizemask; m1 = t1->sizemask; /* Emit entries at cursor */ dv = t0->table[v & m0]; for (j = 0; dv && j < (dv->used+dv->free); j++) { if (dv->entry[j].key == NULL) continue; fn(privdata, dv->entry+j); } /* Iterate over indices in larger table that are the expansion * of the index pointed to by the cursor in the smaller table */ do { /* Emit entries at cursor */ dv = t1->table[v & m1]; for (j = 0; dv && j < (dv->used+dv->free); j++) { if (dv->entry[j].key == NULL) continue; fn(privdata, dv->entry+j); } /* Increment bits not covered by the smaller mask */ v = (((v | m0) + 1) & ~m0) | (v & m0); /* Continue while bits covered by mask difference is non-zero */ } while (v & (m0 ^ m1)); } /* Set unmasked bits so incrementing the reversed cursor * operates on the masked bits of the smaller table */ v |= ~m0; /* Increment the reverse cursor */ v = rev(v); v++; v = rev(v); return v; } /* ------------------------- private functions ------------------------------ */ /* Expand the hash table if needed */ static int _dictExpandIfNeeded(dict *d) { /* Incremental rehashing already in progress. Return. */ if (dictIsRehashing(d)) return DICT_OK; /* If the hash table is empty expand it to the initial size. */ if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE); /* If we reached the 1:1 ratio, and we are allowed to resize the hash * table (global setting) or we should avoid it but the ratio between * elements/buckets is over the "safe" threshold, we resize doubling * the number of buckets. */ if (d->ht[0].used >= (d->ht[0].size * dict_resize_ratio) && (dict_can_resize || d->ht[0].used/d->ht[0].size > dict_force_resize_ratio)) { return dictExpand(d, d->ht[0].used / (dict_resize_ratio/2)); } return DICT_OK; } /* Our hash table capability is a power of two */ static unsigned long _dictNextPower(unsigned long size) { unsigned long i = DICT_HT_INITIAL_SIZE; if (size >= LONG_MAX) return LONG_MAX; while(1) { if (i >= size) return i; i *= 2; } } /* Returns the index of a free slot that can be populated with * a hash entry for the given 'key'. * If the key already exists, -1 is returned. * * Note that if we are in the process of rehashing the hash table, the * index is always returned in the context of the second (new) hash table. */ static int _dictKeyIndex(dict *d, const void *key, unsigned int *hash) { unsigned int h, idx, table; dictEntryVector *dv; /* Expand the hash table if needed */ if (_dictExpandIfNeeded(d) == DICT_ERR) return -1; /* Compute the key hash value */ h = dictHashKey(d, key); *hash = h; for (table = 0; table <= 1; table++) { idx = h & d->ht[table].sizemask; /* Search if this slot does not already contain the given key */ dv = d->ht[table].table[idx]; if (dv != NULL) { uint32_t entries = dv->used + dv->free; uint32_t j; for (j = 0; j < entries; j++) { dictEntry *he = dv->entry+j; if (he->key == NULL || he->hash != h) continue; if (key==he->key || dictCompareKeys(d, key, he->key)) return -1; } } if (!dictIsRehashing(d)) break; } return idx; } void dictEmpty(dict *d, void(callback)(void*)) { _dictClear(d,&d->ht[0],callback); _dictClear(d,&d->ht[1],callback); d->rehashidx = -1; d->iterators = 0; } void dictEnableResize(void) { dict_can_resize = 1; } void dictDisableResize(void) { dict_can_resize = 0; } /* ------------------------------- Debugging ---------------------------------*/ #define DICT_STATS_VECTLEN 50 size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) { unsigned long i, slots = 0, chainlen, maxchainlen = 0; unsigned long totchainlen = 0; unsigned long clvector[DICT_STATS_VECTLEN]; size_t l = 0; if (ht->used == 0) { return snprintf(buf,bufsize, "No stats available for empty dictionaries\n"); } /* Compute stats. */ for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0; for (i = 0; i < ht->size; i++) { if (ht->table[i] == NULL) { clvector[0]++; continue; } slots++; /* For each hash entry on this slot... */ dictEntryVector *dv = ht->table[i]; chainlen = dv->used; clvector[(chainlen < DICT_STATS_VECTLEN) ? chainlen : (DICT_STATS_VECTLEN-1)]++; if (chainlen > maxchainlen) maxchainlen = chainlen; totchainlen += chainlen; } /* Generate human readable stats. */ l += snprintf(buf+l,bufsize-l, "Hash table %d stats (%s):\n" " table size: %ld\n" " number of elements: %ld\n" " different slots: %ld\n" " max chain length: %ld\n" " avg chain length (counted): %.02f\n" " avg chain length (computed): %.02f\n" " Chain length distribution:\n", tableid, (tableid == 0) ? "main hash table" : "rehashing target", ht->size, ht->used, slots, maxchainlen, (float)totchainlen/slots, (float)ht->used/slots); for (i = 0; i < DICT_STATS_VECTLEN-1; i++) { if (clvector[i] == 0) continue; if (l >= bufsize) break; l += snprintf(buf+l,bufsize-l, " %s%ld: %ld (%.02f%%)\n", (i == DICT_STATS_VECTLEN-1)?">= ":"", i, clvector[i], ((float)clvector[i]/ht->size)*100); } /* Unlike snprintf(), teturn the number of characters actually written. */ if (bufsize) buf[bufsize-1] = '\0'; return strlen(buf); } void dictGetStats(char *buf, size_t bufsize, dict *d) { size_t l; char *orig_buf = buf; size_t orig_bufsize = bufsize; l = _dictGetStatsHt(buf,bufsize,&d->ht[0],0); buf += l; bufsize -= l; if (dictIsRehashing(d) && bufsize > 0) { _dictGetStatsHt(buf,bufsize,&d->ht[1],1); } /* Make sure there is a NULL term at the end. */ if (orig_bufsize) orig_buf[orig_bufsize-1] = '\0'; } /* ------------------------------- Benchmark ---------------------------------*/ #ifdef DICT_BENCHMARK_MAIN #include "sds.h" unsigned int hashCallback(const void *key) { return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); } int compareCallback(void *privdata, const void *key1, const void *key2) { int l1,l2; DICT_NOTUSED(privdata); l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; return memcmp(key1, key2, l1) == 0; } void freeCallback(void *privdata, void *val) { DICT_NOTUSED(privdata); sdsfree(val); } dictType BenchmarkDictType = { hashCallback, NULL, NULL, compareCallback, freeCallback, NULL }; #define start_benchmark() start = timeInMilliseconds() #define end_benchmark(msg) do { \ elapsed = timeInMilliseconds()-start; \ printf(msg ": %ld items in %lld ms\n", count, elapsed); \ } while(0); /* dict-benchmark [count] */ int main(int argc, char **argv) { long j; long long start, elapsed; dict *dict = dictCreate(&BenchmarkDictType,NULL); long count = 0; if (argc == 2) { count = strtol(argv[1],NULL,10); } else { count = 5000000; } start_benchmark(); for (j = 0; j < count; j++) { int retval = dictAdd(dict,sdsfromlonglong(j),(void*)j); assert(retval == DICT_OK); } end_benchmark("Inserting"); assert((long)dictSize(dict) == count); /* --- */ if (0) { sds key = sdsfromlonglong(1); dictFind(dict,key); dictFind(dict,key); dictFind(dict,key); sdsfree(key); key = sdsfromlonglong(82); dictFind(dict,key); dictFind(dict,key); dictFind(dict,key); exit(1); } /* --- */ /* Wait for rehashing. */ while (dictIsRehashing(dict)) { dictRehashMilliseconds(dict,100); } start_benchmark(); for (j = 0; j < count; j++) { sds key = sdsfromlonglong(j); dictEntry *de = dictFind(dict,key); assert(de != NULL); sdsfree(key); } end_benchmark("Linear access of existing elements"); start_benchmark(); for (j = 0; j < count; j++) { sds key = sdsfromlonglong(j); dictEntry *de = dictFind(dict,key); assert(de != NULL); sdsfree(key); } end_benchmark("Linear access of existing elements (2nd round)"); start_benchmark(); for (j = 0; j < count; j++) { sds key = sdsfromlonglong(rand() % count); dictEntry *de = dictFind(dict,key); assert(de != NULL); sdsfree(key); } end_benchmark("Random access of existing elements"); start_benchmark(); for (j = 0; j < count; j++) { sds key = sdsfromlonglong(rand() % count); key[0] = 'X'; dictEntry *de = dictFind(dict,key); assert(de == NULL); sdsfree(key); } end_benchmark("Accessing missing"); start_benchmark(); for (j = 0; j < count; j++) { sds key = sdsfromlonglong(j); int retval = dictDelete(dict,key); assert(retval == DICT_OK); key[0] += 17; /* Change first number to letter. */ retval = dictAdd(dict,key,(void*)j); assert(retval == DICT_OK); } end_benchmark("Removing and adding"); } #endif