summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoryoav-steinberg <yoav@monfort.co.il>2021-08-05 08:25:58 +0300
committerGitHub <noreply@github.com>2021-08-05 08:25:58 +0300
commit5e908a290ccbe9c4a7bea9356faf3b837df62793 (patch)
treeee8c2008ef1c18653203245dd40d3befb2cd364f
parent1c59567a7fe207997eef6197eefa7d508d7fbf9f (diff)
downloadredis-5e908a290ccbe9c4a7bea9356faf3b837df62793.tar.gz
dict struct memory optimizations (#9228)
Reduce dict struct memory overhead on 64bit dict size goes down from jemalloc's 96 byte bin to its 56 byte bin. summary of changes: - Remove `privdata` from callbacks and dict creation. (this affects many files, see "Interface change" below). - Meld `dictht` struct into the `dict` struct to eliminate struct padding. (this affects just dict.c and defrag.c) - Eliminate the `sizemask` field, can be calculated from size when needed. - Convert the `size` field into `size_exp` (exponent), utilizes one byte instead of 8. Interface change: pass dict pointer to dict type call back functions. This is instead of passing the removed privdata field. In the future if we'd like to have private data in the callbacks we can extract it from the dict type. We can extend dictType to include a custom dict struct allocator and use it to allocate more data at the end of the dict struct. This data can then be used to store private data later acccessed by the callbacks.
-rw-r--r--src/cluster.c4
-rw-r--r--src/config.c6
-rw-r--r--src/db.c10
-rw-r--r--src/defrag.c16
-rw-r--r--src/dict.c281
-rw-r--r--src/dict.h61
-rw-r--r--src/expire.c6
-rw-r--r--src/latency.c8
-rw-r--r--src/lazyfree.c4
-rw-r--r--src/module.c10
-rw-r--r--src/networking.c4
-rw-r--r--src/object.c4
-rw-r--r--src/rdb.c4
-rw-r--r--src/redis-benchmark.c10
-rw-r--r--src/redis-cli.c35
-rw-r--r--src/replication.c6
-rw-r--r--src/scripting.c2
-rw-r--r--src/sentinel.c22
-rw-r--r--src/server.c56
-rw-r--r--src/server.h12
-rw-r--r--src/t_hash.c10
-rw-r--r--src/t_set.c4
-rw-r--r--src/t_zset.c10
23 files changed, 290 insertions, 295 deletions
diff --git a/src/cluster.c b/src/cluster.c
index 630a9a3f0..8cd931975 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -511,9 +511,9 @@ void clusterInit(void) {
server.cluster->state = CLUSTER_FAIL;
server.cluster->size = 1;
server.cluster->todo_before_sleep = 0;
- server.cluster->nodes = dictCreate(&clusterNodesDictType,NULL);
+ server.cluster->nodes = dictCreate(&clusterNodesDictType);
server.cluster->nodes_black_list =
- dictCreate(&clusterNodesBlackListDictType,NULL);
+ dictCreate(&clusterNodesBlackListDictType);
server.cluster->failover_auth_time = 0;
server.cluster->failover_auth_count = 0;
server.cluster->failover_auth_rank = 0;
diff --git a/src/config.c b/src/config.c
index d9b431d0b..e8d185397 100644
--- a/src/config.c
+++ b/src/config.c
@@ -1104,7 +1104,7 @@ void configGetCommand(client *c) {
/* We use the following dictionary type to store where a configuration
* option is mentioned in the old configuration file, so it's
* like "maxmemory" -> list of line numbers (first line is zero). */
-void dictListDestructor(void *privdata, void *val);
+void dictListDestructor(dict *d, void *val);
/* Sentinel config rewriting is implemented inside sentinel.c by
* rewriteConfigSentinelOption(). */
@@ -1181,8 +1181,8 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) {
char buf[CONFIG_MAX_LINE+1];
int linenum = -1;
struct rewriteConfigState *state = zmalloc(sizeof(*state));
- state->option_to_line = dictCreate(&optionToLineDictType,NULL);
- state->rewritten = dictCreate(&optionSetDictType,NULL);
+ state->option_to_line = dictCreate(&optionToLineDictType);
+ state->rewritten = dictCreate(&optionSetDictType);
state->numlines = 0;
state->lines = NULL;
state->has_tail = 0;
diff --git a/src/db.c b/src/db.c
index 79d482ab9..3926bd33f 100644
--- a/src/db.c
+++ b/src/db.c
@@ -380,7 +380,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
* DB index if we want to empty only a single database.
* The function returns the number of keys removed from the database(s). */
long long emptyDbStructure(redisDb *dbarray, int dbnum, int async,
- void(callback)(void*))
+ void(callback)(dict*))
{
long long removed = 0;
int startdb, enddb;
@@ -422,7 +422,7 @@ long long emptyDbStructure(redisDb *dbarray, int dbnum, int async,
* On success the function returns the number of keys removed from the
* database(s). Otherwise -1 is returned in the specific case the
* DB number is out of range, and errno is set to EINVAL. */
-long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
+long long emptyDb(int dbnum, int flags, void(callback)(dict*)) {
int async = (flags & EMPTYDB_ASYNC);
RedisModuleFlushInfoV1 fi = {REDISMODULE_FLUSHINFO_VERSION,!async,dbnum};
long long removed = 0;
@@ -470,8 +470,8 @@ dbBackup *backupDb(void) {
backup->dbarray = zmalloc(sizeof(redisDb)*server.dbnum);
for (int i=0; i<server.dbnum; i++) {
backup->dbarray[i] = server.db[i];
- server.db[i].dict = dictCreate(&dbDictType,NULL);
- server.db[i].expires = dictCreate(&dbExpiresDictType,NULL);
+ server.db[i].dict = dictCreate(&dbDictType);
+ server.db[i].expires = dictCreate(&dbExpiresDictType);
}
/* Backup cluster slots to keys map if enable cluster. */
@@ -493,7 +493,7 @@ dbBackup *backupDb(void) {
/* Discard a previously created backup, this can be slow (similar to FLUSHALL)
* Arguments are similar to the ones of emptyDb, see EMPTYDB_ flags. */
-void discardDbBackup(dbBackup *backup, int flags, void(callback)(void*)) {
+void discardDbBackup(dbBackup *backup, int flags, void(callback)(dict*)) {
int async = (flags & EMPTYDB_ASYNC);
/* Release main DBs backup . */
diff --git a/src/defrag.c b/src/defrag.c
index 15230b5d2..5d18c9079 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -134,7 +134,6 @@ long dictIterDefragEntry(dictIterator *iter) {
* of the dict and it's iterator, but the benefit is that it is very easy
* to use, and require no other changes in the dict. */
long defragged = 0;
- dictht *ht;
/* Handle the next entry (if there is one), and update the pointer in the
* current entry. */
if (iter->nextEntry) {
@@ -146,12 +145,11 @@ long dictIterDefragEntry(dictIterator *iter) {
}
}
/* handle the case of the first entry in the hash bucket. */
- ht = &iter->d->ht[iter->table];
- if (ht->table[iter->index] == iter->entry) {
+ if (iter->d->ht_table[iter->table][iter->index] == iter->entry) {
dictEntry *newde = activeDefragAlloc(iter->entry);
if (newde) {
iter->entry = newde;
- ht->table[iter->index] = newde;
+ iter->d->ht_table[iter->table][iter->index] = newde;
defragged++;
}
}
@@ -165,14 +163,14 @@ long dictDefragTables(dict* d) {
dictEntry **newtable;
long defragged = 0;
/* handle the first hash table */
- newtable = activeDefragAlloc(d->ht[0].table);
+ newtable = activeDefragAlloc(d->ht_table[0]);
if (newtable)
- defragged++, d->ht[0].table = newtable;
+ defragged++, d->ht_table[0] = newtable;
/* handle the second hash table */
- if (d->ht[1].table) {
- newtable = activeDefragAlloc(d->ht[1].table);
+ if (d->ht_table[1]) {
+ newtable = activeDefragAlloc(d->ht_table[1]);
if (newtable)
- defragged++, d->ht[1].table = newtable;
+ defragged++, d->ht_table[1] = newtable;
}
return defragged;
}
diff --git a/src/dict.c b/src/dict.c
index f10d0ca36..3ae975049 100644
--- a/src/dict.c
+++ b/src/dict.c
@@ -60,10 +60,10 @@ static unsigned int dict_force_resize_ratio = 5;
/* -------------------------- private prototypes ---------------------------- */
-static int _dictExpandIfNeeded(dict *ht);
-static unsigned long _dictNextPower(unsigned long size);
-static long _dictKeyIndex(dict *ht, const void *key, uint64_t hash, dictEntry **existing);
-static int _dictInit(dict *ht, dictType *type, void *privDataPtr);
+static int _dictExpandIfNeeded(dict *d);
+static char _dictNextExp(unsigned long size);
+static long _dictKeyIndex(dict *d, const void *key, uint64_t hash, dictEntry **existing);
+static int _dictInit(dict *d, dictType *type);
/* -------------------------- hash functions -------------------------------- */
@@ -95,32 +95,28 @@ uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len) {
/* Reset a hash table already initialized with ht_init().
* NOTE: This function should only be called by ht_destroy(). */
-static void _dictReset(dictht *ht)
+static void _dictReset(dict *d, int htidx)
{
- ht->table = NULL;
- ht->size = 0;
- ht->sizemask = 0;
- ht->used = 0;
+ d->ht_table[htidx] = NULL;
+ d->ht_size_exp[htidx] = -1;
+ d->ht_used[htidx] = 0;
}
/* Create a new hash table */
-dict *dictCreate(dictType *type,
- void *privDataPtr)
+dict *dictCreate(dictType *type)
{
dict *d = zmalloc(sizeof(*d));
- _dictInit(d,type,privDataPtr);
+ _dictInit(d,type);
return d;
}
/* Initialize the hash table */
-int _dictInit(dict *d, dictType *type,
- void *privDataPtr)
+int _dictInit(dict *d, dictType *type)
{
- _dictReset(&d->ht[0]);
- _dictReset(&d->ht[1]);
+ _dictReset(d, 0);
+ _dictReset(d, 1);
d->type = type;
- d->privdata = privDataPtr;
d->rehashidx = -1;
d->pauserehash = 0;
return DICT_OK;
@@ -133,7 +129,7 @@ int dictResize(dict *d)
unsigned long minimal;
if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
- minimal = d->ht[0].used;
+ minimal = d->ht_used[0];
if (minimal < DICT_HT_INITIAL_SIZE)
minimal = DICT_HT_INITIAL_SIZE;
return dictExpand(d, minimal);
@@ -148,37 +144,41 @@ int _dictExpand(dict *d, unsigned long size, int* malloc_failed)
/* the size is invalid if it is smaller than the number of
* elements already inside the hash table */
- if (dictIsRehashing(d) || d->ht[0].used > size)
+ if (dictIsRehashing(d) || d->ht_used[0] > size)
return DICT_ERR;
- dictht n; /* the new hash table */
- unsigned long realsize = _dictNextPower(size);
+ /* the new hash table */
+ dictEntry **new_ht_table;
+ unsigned long new_ht_used;
+ char new_ht_size_exp = _dictNextExp(size);
/* Rehashing to the same table size is not useful. */
- if (realsize == d->ht[0].size) return DICT_ERR;
+ if (new_ht_size_exp == d->ht_size_exp[0]) return DICT_ERR;
/* Allocate the new hash table and initialize all pointers to NULL */
- n.size = realsize;
- n.sizemask = realsize-1;
if (malloc_failed) {
- n.table = ztrycalloc(realsize*sizeof(dictEntry*));
- *malloc_failed = n.table == NULL;
+ new_ht_table = ztrycalloc(((unsigned long)1<<new_ht_size_exp)*sizeof(dictEntry*));
+ *malloc_failed = new_ht_table == NULL;
if (*malloc_failed)
return DICT_ERR;
} else
- n.table = zcalloc(realsize*sizeof(dictEntry*));
+ new_ht_table = zcalloc(((unsigned long)1<<new_ht_size_exp)*sizeof(dictEntry*));
- n.used = 0;
+ new_ht_used = 0;
/* Is this the first initialization? If so it's not really a rehashing
* we just set the first hash table so that it can accept keys. */
- if (d->ht[0].table == NULL) {
- d->ht[0] = n;
+ if (d->ht_table[0] == NULL) {
+ d->ht_size_exp[0] = new_ht_size_exp;
+ d->ht_used[0] = new_ht_used;
+ d->ht_table[0] = new_ht_table;
return DICT_OK;
}
/* Prepare a second hash table for incremental rehashing */
- d->ht[1] = n;
+ d->ht_size_exp[1] = new_ht_size_exp;
+ d->ht_used[1] = new_ht_used;
+ d->ht_table[1] = new_ht_table;
d->rehashidx = 0;
return DICT_OK;
}
@@ -208,39 +208,42 @@ int dictRehash(dict *d, int n) {
int empty_visits = n*10; /* Max number of empty buckets to visit. */
if (!dictIsRehashing(d)) return 0;
- while(n-- && d->ht[0].used != 0) {
+ while(n-- && d->ht_used[0] != 0) {
dictEntry *de, *nextde;
/* Note that rehashidx can't overflow as we are sure there are more
* elements because ht[0].used != 0 */
- assert(d->ht[0].size > (unsigned long)d->rehashidx);
- while(d->ht[0].table[d->rehashidx] == NULL) {
+ assert(DICTHT_SIZE(d->ht_size_exp[0]) > (unsigned long)d->rehashidx);
+ while(d->ht_table[0][d->rehashidx] == NULL) {
d->rehashidx++;
if (--empty_visits == 0) return 1;
}
- de = d->ht[0].table[d->rehashidx];
+ de = d->ht_table[0][d->rehashidx];
/* Move all the keys in this bucket from the old to the new hash HT */
while(de) {
uint64_t h;
nextde = de->next;
/* Get the index in the new hash table */
- h = dictHashKey(d, de->key) & d->ht[1].sizemask;
- de->next = d->ht[1].table[h];
- d->ht[1].table[h] = de;
- d->ht[0].used--;
- d->ht[1].used++;
+ h = dictHashKey(d, de->key) & DICTHT_SIZE_MASK(d->ht_size_exp[1]);
+ de->next = d->ht_table[1][h];
+ d->ht_table[1][h] = de;
+ d->ht_used[0]--;
+ d->ht_used[1]++;
de = nextde;
}
- d->ht[0].table[d->rehashidx] = NULL;
+ d->ht_table[0][d->rehashidx] = NULL;
d->rehashidx++;
}
/* Check if we already rehashed the whole table... */
- if (d->ht[0].used == 0) {
- zfree(d->ht[0].table);
- d->ht[0] = d->ht[1];
- _dictReset(&d->ht[1]);
+ if (d->ht_used[0] == 0) {
+ zfree(d->ht_table[0]);
+ /* Copy the new ht onto the old one */
+ d->ht_table[0] = d->ht_table[1];
+ d->ht_used[0] = d->ht_used[1];
+ d->ht_size_exp[0] = d->ht_size_exp[1];
+ _dictReset(d, 1);
d->rehashidx = -1;
return 0;
}
@@ -316,7 +319,7 @@ dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing)
{
long index;
dictEntry *entry;
- dictht *ht;
+ int htidx;
if (dictIsRehashing(d)) _dictRehashStep(d);
@@ -329,11 +332,11 @@ dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing)
* Insert the element in top, with the assumption that in a database
* system it is more likely that recently added entries are accessed
* more frequently. */
- ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0];
+ htidx = dictIsRehashing(d) ? 1 : 0;
entry = zmalloc(sizeof(*entry));
- entry->next = ht->table[index];
- ht->table[index] = entry;
- ht->used++;
+ entry->next = d->ht_table[htidx][index];
+ d->ht_table[htidx][index] = entry;
+ d->ht_used[htidx]++;
/* Set the hash entry fields. */
dictSetKey(d, entry, key);
@@ -396,8 +399,8 @@ static dictEntry *dictGenericDelete(dict *d, const void *key, int nofree) {
h = dictHashKey(d, key);
for (table = 0; table <= 1; table++) {
- idx = h & d->ht[table].sizemask;
- he = d->ht[table].table[idx];
+ idx = h & DICTHT_SIZE_MASK(d->ht_size_exp[table]);
+ he = d->ht_table[table][idx];
prevHe = NULL;
while(he) {
if (key==he->key || dictCompareKeys(d, key, he->key)) {
@@ -405,11 +408,11 @@ static dictEntry *dictGenericDelete(dict *d, const void *key, int nofree) {
if (prevHe)
prevHe->next = he->next;
else
- d->ht[table].table[idx] = he->next;
+ d->ht_table[table][idx] = he->next;
if (!nofree) {
dictFreeUnlinkedEntry(d, he);
}
- d->ht[table].used--;
+ d->ht_used[table]--;
return he;
}
prevHe = he;
@@ -447,8 +450,8 @@ int dictDelete(dict *ht, const void *key) {
* // Do something with entry
* dictFreeUnlinkedEntry(entry); // <- This does not need to lookup again.
*/
-dictEntry *dictUnlink(dict *ht, const void *key) {
- return dictGenericDelete(ht,key,1);
+dictEntry *dictUnlink(dict *d, const void *key) {
+ return dictGenericDelete(d,key,1);
}
/* You need to call this function to really free the entry after a call
@@ -461,37 +464,37 @@ void dictFreeUnlinkedEntry(dict *d, dictEntry *he) {
}
/* Destroy an entire dictionary */
-int _dictClear(dict *d, dictht *ht, void(callback)(void *)) {
+int _dictClear(dict *d, int htidx, void(callback)(dict*)) {
unsigned long i;
/* Free all the elements */
- for (i = 0; i < ht->size && ht->used > 0; i++) {
+ for (i = 0; i < DICTHT_SIZE(d->ht_size_exp[htidx]) && d->ht_used[htidx] > 0; i++) {
dictEntry *he, *nextHe;
- if (callback && (i & 65535) == 0) callback(d->privdata);
+ if (callback && (i & 65535) == 0) callback(d);
- if ((he = ht->table[i]) == NULL) continue;
+ if ((he = d->ht_table[htidx][i]) == NULL) continue;
while(he) {
nextHe = he->next;
dictFreeKey(d, he);
dictFreeVal(d, he);
zfree(he);
- ht->used--;
+ d->ht_used[htidx]--;
he = nextHe;
}
}
/* Free the table and the allocated cache structure */
- zfree(ht->table);
+ zfree(d->ht_table[htidx]);
/* Re-initialize the table */
- _dictReset(ht);
+ _dictReset(d, htidx);
return DICT_OK; /* never fails */
}
/* Clear & Release the hash table */
void dictRelease(dict *d)
{
- _dictClear(d,&d->ht[0],NULL);
- _dictClear(d,&d->ht[1],NULL);
+ _dictClear(d,0,NULL);
+ _dictClear(d,1,NULL);
zfree(d);
}
@@ -504,8 +507,8 @@ dictEntry *dictFind(dict *d, const void *key)
if (dictIsRehashing(d)) _dictRehashStep(d);
h = dictHashKey(d, key);
for (table = 0; table <= 1; table++) {
- idx = h & d->ht[table].sizemask;
- he = d->ht[table].table[idx];
+ idx = h & DICTHT_SIZE_MASK(d->ht_size_exp[table]);
+ he = d->ht_table[table][idx];
while(he) {
if (key==he->key || dictCompareKeys(d, key, he->key))
return he;
@@ -533,12 +536,12 @@ long long dictFingerprint(dict *d) {
long long integers[6], hash = 0;
int j;
- integers[0] = (long) d->ht[0].table;
- integers[1] = d->ht[0].size;
- integers[2] = d->ht[0].used;
- integers[3] = (long) d->ht[1].table;
- integers[4] = d->ht[1].size;
- integers[5] = d->ht[1].used;
+ integers[0] = (long) d->ht_table[0];
+ integers[1] = d->ht_size_exp[0];
+ integers[2] = d->ht_used[0];
+ integers[3] = (long) d->ht_table[1];
+ integers[4] = d->ht_size_exp[1];
+ integers[5] = d->ht_used[1];
/* We hash N integers by summing every successive integer with the integer
* hashing of the previous sum. Basically:
@@ -585,7 +588,6 @@ dictEntry *dictNext(dictIterator *iter)
{
while (1) {
if (iter->entry == NULL) {
- dictht *ht = &iter->d->ht[iter->table];
if (iter->index == -1 && iter->table == 0) {
if (iter->safe)
dictPauseRehashing(iter->d);
@@ -593,16 +595,15 @@ dictEntry *dictNext(dictIterator *iter)
iter->fingerprint = dictFingerprint(iter->d);
}
iter->index++;
- if (iter->index >= (long) ht->size) {
+ if (iter->index >= (long) DICTHT_SIZE(iter->d->ht_size_exp[iter->table])) {
if (dictIsRehashing(iter->d) && iter->table == 0) {
iter->table++;
iter->index = 0;
- ht = &iter->d->ht[1];
} else {
break;
}
}
- iter->entry = ht->table[iter->index];
+ iter->entry = iter->d->ht_table[iter->table][iter->index];
} else {
iter->entry = iter->nextEntry;
}
@@ -638,17 +639,18 @@ dictEntry *dictGetRandomKey(dict *d)
if (dictSize(d) == 0) return NULL;
if (dictIsRehashing(d)) _dictRehashStep(d);
if (dictIsRehashing(d)) {
+ unsigned long s0 = DICTHT_SIZE(d->ht_size_exp[0]);
do {
/* We are sure there are no elements in indexes from 0
* to rehashidx-1 */
h = d->rehashidx + (randomULong() % (dictSlots(d) - d->rehashidx));
- he = (h >= d->ht[0].size) ? d->ht[1].table[h - d->ht[0].size] :
- d->ht[0].table[h];
+ he = (h >= s0) ? d->ht_table[1][h - s0] : d->ht_table[0][h];
} while(he == NULL);
} else {
+ unsigned long m = DICTHT_SIZE_MASK(d->ht_size_exp[0]);
do {
- h = randomULong() & d->ht[0].sizemask;
- he = d->ht[0].table[h];
+ h = randomULong() & m;
+ he = d->ht_table[0][h];
} while(he == NULL);
}
@@ -708,9 +710,9 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
}
tables = dictIsRehashing(d) ? 2 : 1;
- maxsizemask = d->ht[0].sizemask;
- if (tables > 1 && maxsizemask < d->ht[1].sizemask)
- maxsizemask = d->ht[1].sizemask;
+ maxsizemask = DICTHT_SIZE_MASK(d->ht_size_exp[0]);
+ if (tables > 1 && maxsizemask < DICTHT_SIZE_MASK(d->ht_size_exp[1]))
+ maxsizemask = DICTHT_SIZE_MASK(d->ht_size_exp[1]);
/* Pick a random point inside the larger table. */
unsigned long i = randomULong() & maxsizemask;
@@ -725,13 +727,13 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
* table, there will be no elements in both tables up to
* the current rehashing index, so we jump if possible.
* (this happens when going from big to small table). */
- if (i >= d->ht[1].size)
+ if (i >= DICTHT_SIZE(d->ht_size_exp[1]))
i = d->rehashidx;
else
continue;
}
- if (i >= d->ht[j].size) continue; /* Out of range for this table. */
- dictEntry *he = d->ht[j].table[i];
+ if (i >= DICTHT_SIZE(d->ht_size_exp[j])) continue; /* Out of range for this table. */
+ dictEntry *he = d->ht_table[j][i];
/* Count contiguous empty buckets, and jump to other
* locations if they reach 'count' (with a minimum of 5). */
@@ -885,7 +887,7 @@ unsigned long dictScan(dict *d,
dictScanBucketFunction* bucketfn,
void *privdata)
{
- dictht *t0, *t1;
+ int htidx0, htidx1;
const dictEntry *de, *next;
unsigned long m0, m1;
@@ -895,12 +897,12 @@ unsigned long dictScan(dict *d,
dictPauseRehashing(d);
if (!dictIsRehashing(d)) {
- t0 = &(d->ht[0]);
- m0 = t0->sizemask;
+ htidx0 = 0;
+ m0 = DICTHT_SIZE_MASK(d->ht_size_exp[htidx0]);
/* Emit entries at cursor */
- if (bucketfn) bucketfn(privdata, &t0->table[v & m0]);
- de = t0->table[v & m0];
+ if (bucketfn) bucketfn(privdata, &d->ht_table[htidx0][v & m0]);
+ de = d->ht_table[htidx0][v & m0];
while (de) {
next = de->next;
fn(privdata, de);
@@ -917,21 +919,21 @@ unsigned long dictScan(dict *d,
v = rev(v);
} else {
- t0 = &d->ht[0];
- t1 = &d->ht[1];
+ htidx0 = 0;
+ htidx1 = 1;
/* Make sure t0 is the smaller and t1 is the bigger table */
- if (t0->size > t1->size) {
- t0 = &d->ht[1];
- t1 = &d->ht[0];
+ if (DICTHT_SIZE(d->ht_size_exp[htidx0]) > DICTHT_SIZE(d->ht_size_exp[htidx1])) {
+ htidx0 = 1;
+ htidx1 = 0;
}
- m0 = t0->sizemask;
- m1 = t1->sizemask;
+ m0 = DICTHT_SIZE_MASK(d->ht_size_exp[htidx0]);
+ m1 = DICTHT_SIZE_MASK(d->ht_size_exp[htidx1]);
/* Emit entries at cursor */
- if (bucketfn) bucketfn(privdata, &t0->table[v & m0]);
- de = t0->table[v & m0];
+ if (bucketfn) bucketfn(privdata, &d->ht_table[htidx0][v & m0]);
+ de = d->ht_table[htidx0][v & m0];
while (de) {
next = de->next;
fn(privdata, de);
@@ -942,8 +944,8 @@ unsigned long dictScan(dict *d,
* of the index pointed to by the cursor in the smaller table */
do {
/* Emit entries at cursor */
- if (bucketfn) bucketfn(privdata, &t1->table[v & m1]);
- de = t1->table[v & m1];
+ if (bucketfn) bucketfn(privdata, &d->ht_table[htidx1][v & m1]);
+ de = d->ht_table[htidx1][v & m1];
while (de) {
next = de->next;
fn(privdata, de);
@@ -973,8 +975,8 @@ unsigned long dictScan(dict *d,
static int dictTypeExpandAllowed(dict *d) {
if (d->type->expandAllowed == NULL) return 1;
return d->type->expandAllowed(
- _dictNextPower(d->ht[0].used + 1) * sizeof(dictEntry*),
- (double)d->ht[0].used / d->ht[0].size);
+ DICTHT_SIZE(_dictNextExp(d->ht_used[0] + 1)) * sizeof(dictEntry*),
+ (double)d->ht_used[0] / DICTHT_SIZE(d->ht_size_exp[0]));
}
/* Expand the hash table if needed */
@@ -984,32 +986,33 @@ static int _dictExpandIfNeeded(dict *d)
if (dictIsRehashing(d)) return DICT_OK;
/* If the hash table is empty expand it to the initial size. */
- if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);
+ if (DICTHT_SIZE(d->ht_size_exp[0]) == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);
/* If we reached the 1:1 ratio, and we are allowed to resize the hash
* table (global setting) or we should avoid it but the ratio between
* elements/buckets is over the "safe" threshold, we resize doubling
* the number of buckets. */
- if (d->ht[0].used >= d->ht[0].size &&
+ if (d->ht_used[0] >= DICTHT_SIZE(d->ht_size_exp[0]) &&
(dict_can_resize ||
- d->ht[0].used/d->ht[0].size > dict_force_resize_ratio) &&
+ d->ht_used[0]/ DICTHT_SIZE(d->ht_size_exp[0]) > dict_force_resize_ratio) &&
dictTypeExpandAllowed(d))
{
- return dictExpand(d, d->ht[0].used + 1);
+ return dictExpand(d, d->ht_used[0] + 1);
}
return DICT_OK;
}
+/* TODO: clz optimization */
/* Our hash table capability is a power of two */
-static unsigned long _dictNextPower(unsigned long size)
+static char _dictNextExp(unsigned long size)
{
- unsigned long i = DICT_HT_INITIAL_SIZE;
+ unsigned char e = DICT_HT_INITIAL_EXP;
- if (size >= LONG_MAX) return LONG_MAX + 1LU;
+ if (size >= LONG_MAX) return (8*sizeof(long)-1);
while(1) {
- if (i >= size)
- return i;
- i *= 2;
+ if (((unsigned long)1<<e) >= size)
+ return e;
+ e++;
}
}
@@ -1030,9 +1033,9 @@ static long _dictKeyIndex(dict *d, const void *key, uint64_t hash, dictEntry **e
if (_dictExpandIfNeeded(d) == DICT_ERR)
return -1;
for (table = 0; table <= 1; table++) {
- idx = hash & d->ht[table].sizemask;
+ idx = hash & DICTHT_SIZE_MASK(d->ht_size_exp[table]);
/* Search if this slot does not already contain the given key */
- he = d->ht[table].table[idx];
+ he = d->ht_table[table][idx];
while(he) {
if (key==he->key || dictCompareKeys(d, key, he->key)) {
if (existing) *existing = he;
@@ -1045,9 +1048,9 @@ static long _dictKeyIndex(dict *d, const void *key, uint64_t hash, dictEntry **e
return idx;
}
-void dictEmpty(dict *d, void(callback)(void*)) {
- _dictClear(d,&d->ht[0],callback);
- _dictClear(d,&d->ht[1],callback);
+void dictEmpty(dict *d, void(callback)(dict*)) {
+ _dictClear(d,0,callback);
+ _dictClear(d,1,callback);
d->rehashidx = -1;
d->pauserehash = 0;
}
@@ -1075,8 +1078,8 @@ dictEntry **dictFindEntryRefByPtrAndHash(dict *d, const void *oldptr, uint64_t h
if (dictSize(d) == 0) return NULL; /* dict is empty */
for (table = 0; table <= 1; table++) {
- idx = hash & d->ht[table].sizemask;
- heref = &d->ht[table].table[idx];
+ idx = hash & DICTHT_SIZE_MASK(d->ht_size_exp[table]);
+ heref = &d->ht_table[table][idx];
he = *heref;
while(he) {
if (oldptr==he->key)
@@ -1092,30 +1095,30 @@ dictEntry **dictFindEntryRefByPtrAndHash(dict *d, const void *oldptr, uint64_t h
/* ------------------------------- Debugging ---------------------------------*/
#define DICT_STATS_VECTLEN 50
-size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) {
+size_t _dictGetStatsHt(char *buf, size_t bufsize, dict *d, int htidx) {
unsigned long i, slots = 0, chainlen, maxchainlen = 0;
unsigned long totchainlen = 0;
unsigned long clvector[DICT_STATS_VECTLEN];
size_t l = 0;
- if (ht->used == 0) {
+ if (d->ht_used[htidx] == 0) {
return snprintf(buf,bufsize,
"No stats available for empty dictionaries\n");
}
/* Compute stats. */
for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0;
- for (i = 0; i < ht->size; i++) {
+ for (i = 0; i < DICTHT_SIZE(d->ht_size_exp[htidx]); i++) {
dictEntry *he;
- if (ht->table[i] == NULL) {
+ if (d->ht_table[htidx][i] == NULL) {
clvector[0]++;
continue;
}
slots++;
/* For each hash entry on this slot... */
chainlen = 0;
- he = ht->table[i];
+ he = d->ht_table[htidx][i];
while(he) {
chainlen++;
he = he->next;
@@ -1135,9 +1138,9 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) {
" avg chain length (counted): %.02f\n"
" avg chain length (computed): %.02f\n"
" Chain length distribution:\n",
- tableid, (tableid == 0) ? "main hash table" : "rehashing target",
- ht->size, ht->used, slots, maxchainlen,
- (float)totchainlen/slots, (float)ht->used/slots);
+ htidx, (htidx == 0) ? "main hash table" : "rehashing target",
+ DICTHT_SIZE(d->ht_size_exp[htidx]), d->ht_used[htidx], slots, maxchainlen,
+ (float)totchainlen/slots, (float)d->ht_used[htidx]/slots);
for (i = 0; i < DICT_STATS_VECTLEN-1; i++) {
if (clvector[i] == 0) continue;
@@ -1145,7 +1148,7 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) {
l += snprintf(buf+l,bufsize-l,
" %s%ld: %ld (%.02f%%)\n",
(i == DICT_STATS_VECTLEN-1)?">= ":"",
- i, clvector[i], ((float)clvector[i]/ht->size)*100);
+ i, clvector[i], ((float)clvector[i]/DICTHT_SIZE(d->ht_size_exp[htidx]))*100);
}
/* Unlike snprintf(), return the number of characters actually written. */
@@ -1158,11 +1161,11 @@ void dictGetStats(char *buf, size_t bufsize, dict *d) {
char *orig_buf = buf;
size_t orig_bufsize = bufsize;
- l = _dictGetStatsHt(buf,bufsize,&d->ht[0],0);
+ l = _dictGetStatsHt(buf,bufsize,d,0);
buf += l;
bufsize -= l;
if (dictIsRehashing(d) && bufsize > 0) {
- _dictGetStatsHt(buf,bufsize,&d->ht[1],1);
+ _dictGetStatsHt(buf,bufsize,d,1);
}
/* Make sure there is a NULL term at the end. */
if (orig_bufsize) orig_buf[orig_bufsize-1] = '\0';
@@ -1172,13 +1175,15 @@ void dictGetStats(char *buf, size_t bufsize, dict *d) {
#ifdef REDIS_TEST
+#define UNUSED(V) ((void) V)
+
uint64_t hashCallback(const void *key) {
return dictGenHashFunction((unsigned char*)key, strlen((char*)key));
}
-int compareCallback(void *privdata, const void *key1, const void *key2) {
+int compareCallback(dict *d, const void *key1, const void *key2) {
int l1,l2;
- DICT_NOTUSED(privdata);
+ UNUSED(d);
l1 = strlen((char*)key1);
l2 = strlen((char*)key2);
@@ -1186,8 +1191,8 @@ int compareCallback(void *privdata, const void *key1, const void *key2) {
return memcmp(key1, key2, l1) == 0;
}
-void freeCallback(void *privdata, void *val) {
- DICT_NOTUSED(privdata);
+void freeCallback(dict *d, void *val) {
+ UNUSED(d);
zfree(val);
}
@@ -1224,7 +1229,7 @@ dictType BenchmarkDictType = {
int dictTest(int argc, char **argv, int accurate) {
long j;
long long start, elapsed;
- dict *dict = dictCreate(&BenchmarkDictType,NULL);
+ dict *dict = dictCreate(&BenchmarkDictType);
long count = 0;
if (argc == 4) {
diff --git a/src/dict.h b/src/dict.h
index 02af65cf8..999053923 100644
--- a/src/dict.h
+++ b/src/dict.h
@@ -44,9 +44,6 @@
#define DICT_OK 0
#define DICT_ERR 1
-/* Unused arguments generate annoying warnings... */
-#define DICT_NOTUSED(V) ((void) V)
-
typedef struct dictEntry {
void *key;
union {
@@ -58,32 +55,33 @@ typedef struct dictEntry {
struct dictEntry *next;
} dictEntry;
+typedef struct dict dict;
+
typedef struct dictType {
uint64_t (*hashFunction)(const void *key);
- void *(*keyDup)(void *privdata, const void *key);
- void *(*valDup)(void *privdata, const void *obj);
- int (*keyCompare)(void *privdata, const void *key1, const void *key2);
- void (*keyDestructor)(void *privdata, void *key);
- void (*valDestructor)(void *privdata, void *obj);
+ void *(*keyDup)(dict *d, const void *key);
+ void *(*valDup)(dict *d, const void *obj);
+ int (*keyCompare)(dict *d, const void *key1, const void *key2);
+ void (*keyDestructor)(dict *d, void *key);
+ void (*valDestructor)(dict *d, void *obj);
int (*expandAllowed)(size_t moreMem, double usedRatio);
} dictType;
-/* This is our hash table structure. Every dictionary has two of this as we
- * implement incremental rehashing, for the old to the new table. */
-typedef struct dictht {
- dictEntry **table;
- unsigned long size;
- unsigned long sizemask;
- unsigned long used;
-} dictht;
+#define DICTHT_SIZE(exp) ((exp) == -1 ? 0 : (unsigned long)1<<(exp))
+#define DICTHT_SIZE_MASK(exp) ((exp) == -1 ? 0 : (DICTHT_SIZE(exp))-1)
-typedef struct dict {
+struct dict {
dictType *type;
- void *privdata;
- dictht ht[2];
+
+ dictEntry **ht_table[2];
+ unsigned long ht_used[2];
+
long rehashidx; /* rehashing not in progress if rehashidx == -1 */
+
+ /* Keep small vars at end for optimal (minimal) struct padding */
int16_t pauserehash; /* If >0 rehashing is paused (<0 indicates coding error) */
-} dict;
+ char ht_size_exp[2]; /* exponent of size. (size = 1<<exp) */
+};
/* If safe is set to 1 this is a safe iterator, that means, you can call
* dictAdd, dictFind, and other functions against the dictionary even while
@@ -102,16 +100,17 @@ typedef void (dictScanFunction)(void *privdata, const dictEntry *de);
typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
/* This is the initial size of every hash table */
-#define DICT_HT_INITIAL_SIZE 4
+#define DICT_HT_INITIAL_EXP 2
+#define DICT_HT_INITIAL_SIZE (1<<(DICT_HT_INITIAL_EXP))
/* ------------------------------- Macros ------------------------------------*/
#define dictFreeVal(d, entry) \
if ((d)->type->valDestructor) \
- (d)->type->valDestructor((d)->privdata, (entry)->v.val)
+ (d)->type->valDestructor((d), (entry)->v.val)
#define dictSetVal(d, entry, _val_) do { \
if ((d)->type->valDup) \
- (entry)->v.val = (d)->type->valDup((d)->privdata, _val_); \
+ (entry)->v.val = (d)->type->valDup((d), _val_); \
else \
(entry)->v.val = (_val_); \
} while(0)
@@ -127,18 +126,18 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
#define dictFreeKey(d, entry) \
if ((d)->type->keyDestructor) \
- (d)->type->keyDestructor((d)->privdata, (entry)->key)
+ (d)->type->keyDestructor((d), (entry)->key)
#define dictSetKey(d, entry, _key_) do { \
if ((d)->type->keyDup) \
- (entry)->key = (d)->type->keyDup((d)->privdata, _key_); \
+ (entry)->key = (d)->type->keyDup((d), _key_); \
else \
(entry)->key = (_key_); \
} while(0)
#define dictCompareKeys(d, key1, key2) \
(((d)->type->keyCompare) ? \
- (d)->type->keyCompare((d)->privdata, key1, key2) : \
+ (d)->type->keyCompare((d), key1, key2) : \
(key1) == (key2))
#define dictHashKey(d, key) (d)->type->hashFunction(key)
@@ -147,8 +146,8 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
#define dictGetSignedIntegerVal(he) ((he)->v.s64)
#define dictGetUnsignedIntegerVal(he) ((he)->v.u64)
#define dictGetDoubleVal(he) ((he)->v.d)
-#define dictSlots(d) ((d)->ht[0].size+(d)->ht[1].size)
-#define dictSize(d) ((d)->ht[0].used+(d)->ht[1].used)
+#define dictSlots(d) (DICTHT_SIZE((d)->ht_size_exp[0])+DICTHT_SIZE((d)->ht_size_exp[1]))
+#define dictSize(d) ((d)->ht_used[0]+(d)->ht_used[1])
#define dictIsRehashing(d) ((d)->rehashidx != -1)
#define dictPauseRehashing(d) (d)->pauserehash++
#define dictResumeRehashing(d) (d)->pauserehash--
@@ -161,7 +160,7 @@ typedef void (dictScanBucketFunction)(void *privdata, dictEntry **bucketref);
#endif
/* API */
-dict *dictCreate(dictType *type, void *privDataPtr);
+dict *dictCreate(dictType *type);
int dictExpand(dict *d, unsigned long size);
int dictTryExpand(dict *d, unsigned long size);
int dictAdd(dict *d, void *key, void *val);
@@ -169,7 +168,7 @@ dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing);
dictEntry *dictAddOrFind(dict *d, void *key);
int dictReplace(dict *d, void *key, void *val);
int dictDelete(dict *d, const void *key);
-dictEntry *dictUnlink(dict *ht, const void *key);
+dictEntry *dictUnlink(dict *d, const void *key);
void dictFreeUnlinkedEntry(dict *d, dictEntry *he);
void dictRelease(dict *d);
dictEntry * dictFind(dict *d, const void *key);
@@ -185,7 +184,7 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count);
void dictGetStats(char *buf, size_t bufsize, dict *d);
uint64_t dictGenHashFunction(const void *key, int len);
uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len);
-void dictEmpty(dict *d, void(callback)(void*));
+void dictEmpty(dict *d, void(callback)(dict*));
void dictEnableResize(void);
void dictDisableResize(void);
int dictRehash(dict *d, int n);
diff --git a/src/expire.c b/src/expire.c
index 2dcdddaf6..858c13549 100644
--- a/src/expire.c
+++ b/src/expire.c
@@ -258,8 +258,8 @@ void activeExpireCycle(int type) {
if (table == 1 && !dictIsRehashing(db->expires)) break;
unsigned long idx = db->expires_cursor;
- idx &= db->expires->ht[table].sizemask;
- dictEntry *de = db->expires->ht[table].table[idx];
+ idx &= DICTHT_SIZE_MASK(db->expires->ht_size_exp[table]);
+ dictEntry *de = db->expires->ht_table[table][idx];
long long ttl;
/* Scan the current bucket of the current table. */
@@ -439,7 +439,7 @@ void rememberSlaveKeyWithExpire(redisDb *db, robj *key) {
NULL, /* val destructor */
NULL /* allow to expand */
};
- slaveKeysWithExpire = dictCreate(&dt,NULL);
+ slaveKeysWithExpire = dictCreate(&dt);
}
if (db->id > 63) return;
diff --git a/src/latency.c b/src/latency.c
index 06b94abd8..d24c7922a 100644
--- a/src/latency.c
+++ b/src/latency.c
@@ -36,8 +36,8 @@
#include "server.h"
/* Dictionary type for latency events. */
-int dictStringKeyCompare(void *privdata, const void *key1, const void *key2) {
- UNUSED(privdata);
+int dictStringKeyCompare(dict *d, const void *key1, const void *key2) {
+ UNUSED(d);
return strcmp(key1,key2) == 0;
}
@@ -45,7 +45,7 @@ uint64_t dictStringHash(const void *key) {
return dictGenHashFunction(key, strlen(key));
}
-void dictVanillaFree(void *privdata, void *val);
+void dictVanillaFree(dict *d, void *val);
dictType latencyTimeSeriesDictType = {
dictStringHash, /* hash function */
@@ -105,7 +105,7 @@ int THPGetAnonHugePagesSize(void) {
* of time series, each time series is created on demand in order to avoid
* having a fixed list to maintain. */
void latencyMonitorInit(void) {
- server.latency_events = dictCreate(&latencyTimeSeriesDictType,NULL);
+ server.latency_events = dictCreate(&latencyTimeSeriesDictType);
}
/* Add the specified sample to the specified time series "event".
diff --git a/src/lazyfree.c b/src/lazyfree.c
index 398ebd194..34370cdb3 100644
--- a/src/lazyfree.c
+++ b/src/lazyfree.c
@@ -201,8 +201,8 @@ void freeObjAsync(robj *key, robj *obj, int dbid) {
* lazy freeing. */
void emptyDbAsync(redisDb *db) {
dict *oldht1 = db->dict, *oldht2 = db->expires;
- db->dict = dictCreate(&dbDictType,NULL);
- db->expires = dictCreate(&dbExpiresDictType,NULL);
+ db->dict = dictCreate(&dbDictType);
+ db->expires = dictCreate(&dbExpiresDictType);
atomicIncr(lazyfree_objects,dictSize(oldht1));
bioCreateLazyFreeJob(lazyfreeFreeDatabase,2,oldht1,oldht2);
}
diff --git a/src/module.c b/src/module.c
index f8d5e21d4..0027692c4 100644
--- a/src/module.c
+++ b/src/module.c
@@ -8618,8 +8618,8 @@ uint64_t dictCStringKeyHash(const void *key) {
return dictGenHashFunction((unsigned char*)key, strlen((char*)key));
}
-int dictCStringKeyCompare(void *privdata, const void *key1, const void *key2) {
- UNUSED(privdata);
+int dictCStringKeyCompare(dict *d, const void *key1, const void *key2) {
+ UNUSED(d);
return strcmp(key1,key2) == 0;
}
@@ -8656,7 +8656,7 @@ void moduleInitModulesSystemLast(void) {
void moduleInitModulesSystem(void) {
moduleUnblockedClients = listCreate();
server.loadmodule_queue = listCreate();
- modules = dictCreate(&modulesDictType,NULL);
+ modules = dictCreate(&modulesDictType);
/* Set up the keyspace notification subscriber list and static client */
moduleKeyspaceSubscribers = listCreate();
@@ -9503,8 +9503,8 @@ int RM_GetDbIdFromDefragCtx(RedisModuleDefragCtx *ctx) {
/* Register all the APIs we export. Keep this function at the end of the
* file so that's easy to seek it to add new entries. */
void moduleRegisterCoreAPI(void) {
- server.moduleapi = dictCreate(&moduleAPIDictType,NULL);
- server.sharedapi = dictCreate(&moduleAPIDictType,NULL);
+ server.moduleapi = dictCreate(&moduleAPIDictType);
+ server.sharedapi = dictCreate(&moduleAPIDictType);
REGISTER_API(Alloc);
REGISTER_API(Calloc);
REGISTER_API(Realloc);
diff --git a/src/networking.c b/src/networking.c
index 4d513f468..b4c63b47e 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -165,7 +165,7 @@ client *createClient(connection *conn) {
listSetDupMethod(c->reply,dupClientReplyValue);
c->btype = BLOCKED_NONE;
c->bpop.timeout = 0;
- c->bpop.keys = dictCreate(&objectKeyHeapPointerValueDictType,NULL);
+ c->bpop.keys = dictCreate(&objectKeyHeapPointerValueDictType);
c->bpop.target = NULL;
c->bpop.xread_group = NULL;
c->bpop.xread_consumer = NULL;
@@ -174,7 +174,7 @@ client *createClient(connection *conn) {
c->bpop.reploffset = 0;
c->woff = 0;
c->watched_keys = listCreate();
- c->pubsub_channels = dictCreate(&objectKeyPointerValueDictType,NULL);
+ c->pubsub_channels = dictCreate(&objectKeyPointerValueDictType);
c->pubsub_patterns = listCreate();
c->peerid = NULL;
c->sockname = NULL;
diff --git a/src/object.c b/src/object.c
index e8c45e72a..8d79645d3 100644
--- a/src/object.c
+++ b/src/object.c
@@ -226,7 +226,7 @@ robj *createZiplistObject(void) {
}
robj *createSetObject(void) {
- dict *d = dictCreate(&setDictType,NULL);
+ dict *d = dictCreate(&setDictType);
robj *o = createObject(OBJ_SET,d);
o->encoding = OBJ_ENCODING_HT;
return o;
@@ -250,7 +250,7 @@ robj *createZsetObject(void) {
zset *zs = zmalloc(sizeof(*zs));
robj *o;
- zs->dict = dictCreate(&zsetDictType,NULL);
+ zs->dict = dictCreate(&zsetDictType);
zs->zsl = zslCreate();
o = createObject(OBJ_ZSET,zs);
o->encoding = OBJ_ENCODING_SKIPLIST;
diff --git a/src/rdb.c b/src/rdb.c
index c17fdde42..329a86fff 100644
--- a/src/rdb.c
+++ b/src/rdb.c
@@ -1696,7 +1696,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid) {
* later when the ziplist is converted to a dict.
* Create a set (dict with no values) to for a dup search.
* We can dismiss it as soon as we convert the ziplist to a hash. */
- dupSearchDict = dictCreate(&hashDictType, NULL);
+ dupSearchDict = dictCreate(&hashDictType);
}
@@ -1851,7 +1851,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid) {
unsigned char *fstr, *vstr;
unsigned int flen, vlen;
unsigned int maxlen = 0;
- dict *dupSearchDict = dictCreate(&hashDictType, NULL);
+ dict *dupSearchDict = dictCreate(&hashDictType);
while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) {
if (flen > maxlen) maxlen = flen;
diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c
index 273c8373a..c6ecf757c 100644
--- a/src/redis-benchmark.c
+++ b/src/redis-benchmark.c
@@ -223,8 +223,7 @@ static sds benchmarkVersion(void) {
/* Dict callbacks */
static uint64_t dictSdsHash(const void *key);
-static int dictSdsKeyCompare(void *privdata, const void *key1,
- const void *key2);
+static int dictSdsKeyCompare(dict *d, const void *key1, const void *key2);
/* Implementation */
static long long ustime(void) {
@@ -251,11 +250,10 @@ static uint64_t dictSdsHash(const void *key) {
return dictGenHashFunction((unsigned char*)key, sdslen((char*)key));
}
-static int dictSdsKeyCompare(void *privdata, const void *key1,
- const void *key2)
+static int dictSdsKeyCompare(dict *d, const void *key1, const void *key2)
{
int l1,l2;
- DICT_NOTUSED(privdata);
+ UNUSED(d);
l1 = sdslen((sds)key1);
l2 = sdslen((sds)key2);
@@ -1302,7 +1300,7 @@ static int fetchClusterSlotsConfiguration(client c) {
NULL /* allow to expand */
};
/* printf("[%d] fetchClusterSlotsConfiguration\n", c->thread_id); */
- dict *masters = dictCreate(&dtype, NULL);
+ dict *masters = dictCreate(&dtype);
redisContext *ctx = NULL;
for (i = 0; i < (size_t) config.cluster_node_count; i++) {
clusterNode *node = config.cluster_nodes[i];
diff --git a/src/redis-cli.c b/src/redis-cli.c
index fbd392d4b..8a06f22f0 100644
--- a/src/redis-cli.c
+++ b/src/redis-cli.c
@@ -168,10 +168,10 @@ int spectrum_palette_size;
/* Dict Helpers */
static uint64_t dictSdsHash(const void *key);
-static int dictSdsKeyCompare(void *privdata, const void *key1,
+static int dictSdsKeyCompare(dict *d, const void *key1,
const void *key2);
-static void dictSdsDestructor(void *privdata, void *val);
-static void dictListDestructor(void *privdata, void *val);
+static void dictSdsDestructor(dict *d, void *val);
+static void dictListDestructor(dict *d, void *val);
/* Cluster Manager Command Info */
typedef struct clusterManagerCommand {
@@ -454,11 +454,10 @@ static uint64_t dictSdsHash(const void *key) {
return dictGenHashFunction((unsigned char*)key, sdslen((char*)key));
}
-static int dictSdsKeyCompare(void *privdata, const void *key1,
- const void *key2)
+static int dictSdsKeyCompare(dict *d, const void *key1, const void *key2)
{
int l1,l2;
- DICT_NOTUSED(privdata);
+ UNUSED(d);
l1 = sdslen((sds)key1);
l2 = sdslen((sds)key2);
@@ -466,15 +465,15 @@ static int dictSdsKeyCompare(void *privdata, const void *key1,
return memcmp(key1, key2, l1) == 0;
}
-static void dictSdsDestructor(void *privdata, void *val)
+static void dictSdsDestructor(dict *d, void *val)
{
- DICT_NOTUSED(privdata);
+ UNUSED(d);
sdsfree(val);
}
-void dictListDestructor(void *privdata, void *val)
+void dictListDestructor(dict *d, void *val)
{
- DICT_NOTUSED(privdata);
+ UNUSED(d);
listRelease((list*)val);
}
@@ -3012,7 +3011,7 @@ static int clusterManagerGetAntiAffinityScore(clusterManagerNodeArray *ipnodes,
* replication of each other) */
for (i = 0; i < ip_count; i++) {
clusterManagerNodeArray *node_array = &(ipnodes[i]);
- dict *related = dictCreate(&clusterManagerDictType, NULL);
+ dict *related = dictCreate(&clusterManagerDictType);
char *ip = NULL;
for (j = 0; j < node_array->len; j++) {
clusterManagerNode *node = node_array->nodes[j];
@@ -4519,7 +4518,7 @@ cleanup:
* node addresses that cannot reach the unreachable node. */
static dict *clusterManagerGetLinkStatus(void) {
if (cluster_manager.nodes == NULL) return NULL;
- dict *status = dictCreate(&clusterManagerLinkDictType, NULL);
+ dict *status = dictCreate(&clusterManagerLinkDictType);
listIter li;
listNode *ln;
listRewind(cluster_manager.nodes, &li);
@@ -5282,7 +5281,7 @@ static int clusterManagerCheckCluster(int quiet) {
clusterManagerNode *n = ln->value;
if (n->migrating != NULL) {
if (open_slots == NULL)
- open_slots = dictCreate(&clusterManagerDictType, NULL);
+ open_slots = dictCreate(&clusterManagerDictType);
sds errstr = sdsempty();
errstr = sdscatprintf(errstr,
"[WARNING] Node %s:%d has slots in "
@@ -5300,7 +5299,7 @@ static int clusterManagerCheckCluster(int quiet) {
}
if (n->importing != NULL) {
if (open_slots == NULL)
- open_slots = dictCreate(&clusterManagerDictType, NULL);
+ open_slots = dictCreate(&clusterManagerDictType);
sds errstr = sdsempty();
errstr = sdscatprintf(errstr,
"[WARNING] Node %s:%d has slots in "
@@ -5361,7 +5360,7 @@ static int clusterManagerCheckCluster(int quiet) {
dictType dtype = clusterManagerDictType;
dtype.keyDestructor = dictSdsDestructor;
dtype.valDestructor = dictListDestructor;
- clusterManagerUncoveredSlots = dictCreate(&dtype, NULL);
+ clusterManagerUncoveredSlots = dictCreate(&dtype);
int fixed = clusterManagerFixSlotsCoverage(slots);
if (fixed > 0) result = 1;
}
@@ -7528,9 +7527,9 @@ static typeinfo* typeinfo_add(dict *types, char* name, typeinfo* type_template)
return info;
}
-void type_free(void* priv_data, void* val) {
+void type_free(dict *d, void* val) {
typeinfo *info = val;
- UNUSED(priv_data);
+ UNUSED(d);
if (info->biggest_key)
sdsfree(info->biggest_key);
sdsfree(info->name);
@@ -7656,7 +7655,7 @@ static void findBigKeys(int memkeys, unsigned memkeys_samples) {
typeinfo **types = NULL;
double pct;
- dict *types_dict = dictCreate(&typeinfoDictType, NULL);
+ dict *types_dict = dictCreate(&typeinfoDictType);
typeinfo_add(types_dict, "string", &type_string);
typeinfo_add(types_dict, "list", &type_list);
typeinfo_add(types_dict, "set", &type_set);
diff --git a/src/replication.c b/src/replication.c
index a0bfdf05f..99e73285d 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -1444,8 +1444,8 @@ void replicationSendNewlineToMaster(void) {
/* Callback used by emptyDb() while flushing away old data to load
* the new dataset received by the master. */
-void replicationEmptyDbCallback(void *privdata) {
- UNUSED(privdata);
+void replicationEmptyDbCallback(dict *d) {
+ UNUSED(d);
if (server.repl_state == REPL_STATE_TRANSFER)
replicationSendNewlineToMaster();
}
@@ -3078,7 +3078,7 @@ void refreshGoodSlavesCount(void) {
/* Initialize the script cache, only called at startup. */
void replicationScriptCacheInit(void) {
server.repl_scriptcache_size = 10000;
- server.repl_scriptcache_dict = dictCreate(&replScriptCacheDictType,NULL);
+ server.repl_scriptcache_dict = dictCreate(&replScriptCacheDictType);
server.repl_scriptcache_fifo = listCreate();
}
diff --git a/src/scripting.c b/src/scripting.c
index 87b3b036b..3b807912a 100644
--- a/src/scripting.c
+++ b/src/scripting.c
@@ -1269,7 +1269,7 @@ void scriptingInit(int setup) {
/* Initialize a dictionary we use to map SHAs to scripts.
* This is useful for replication, as we need to replicate EVALSHA
* as EVAL, so we need to remember the associated script. */
- server.lua_scripts = dictCreate(&shaScriptObjectDictType,NULL);
+ server.lua_scripts = dictCreate(&shaScriptObjectDictType);
server.lua_scripts_mem = 0;
/* Register the redis commands table and fields */
diff --git a/src/sentinel.c b/src/sentinel.c
index 40f9a2ac4..f8e1e607d 100644
--- a/src/sentinel.c
+++ b/src/sentinel.c
@@ -406,8 +406,8 @@ void sentinelSimFailureCrash(void);
void releaseSentinelRedisInstance(sentinelRedisInstance *ri);
-void dictInstancesValDestructor (void *privdata, void *obj) {
- UNUSED(privdata);
+void dictInstancesValDestructor (dict *d, void *obj) {
+ UNUSED(d);
releaseSentinelRedisInstance(obj);
}
@@ -527,7 +527,7 @@ void initSentinel(void) {
/* Initialize various data structures. */
sentinel.current_epoch = 0;
- sentinel.masters = dictCreate(&instancesDictType,NULL);
+ sentinel.masters = dictCreate(&instancesDictType);
sentinel.tilt = 0;
sentinel.tilt_start_time = 0;
sentinel.previous_time = mstime();
@@ -1346,13 +1346,13 @@ sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *
ri->slave_master_port = 0;
ri->slave_master_link_status = SENTINEL_MASTER_LINK_STATUS_DOWN;
ri->slave_repl_offset = 0;
- ri->sentinels = dictCreate(&instancesDictType,NULL);
+ ri->sentinels = dictCreate(&instancesDictType);
ri->quorum = quorum;
ri->parallel_syncs = SENTINEL_DEFAULT_PARALLEL_SYNCS;
ri->master = master;
- ri->slaves = dictCreate(&instancesDictType,NULL);
+ ri->slaves = dictCreate(&instancesDictType);
ri->info_refresh = 0;
- ri->renamed_commands = dictCreate(&renamedCommandsDictType,NULL);
+ ri->renamed_commands = dictCreate(&renamedCommandsDictType);
/* Failover state. */
ri->leader = NULL;
@@ -1563,10 +1563,10 @@ void sentinelDelFlagsToDictOfRedisInstances(dict *instances, int flags) {
void sentinelResetMaster(sentinelRedisInstance *ri, int flags) {
serverAssert(ri->flags & SRI_MASTER);
dictRelease(ri->slaves);
- ri->slaves = dictCreate(&instancesDictType,NULL);
+ ri->slaves = dictCreate(&instancesDictType);
if (!(flags & SENTINEL_RESET_NO_SENTINELS)) {
dictRelease(ri->sentinels);
- ri->sentinels = dictCreate(&instancesDictType,NULL);
+ ri->sentinels = dictCreate(&instancesDictType);
}
instanceLinkCloseConnection(ri->link,ri->link->cc);
instanceLinkCloseConnection(ri->link,ri->link->pc);
@@ -3789,7 +3789,7 @@ NULL
copy_keeper.valDestructor = NULL;
dict *masters_local = sentinel.masters;
if (c->argc > 2) {
- masters_local = dictCreate(&copy_keeper, NULL);
+ masters_local = dictCreate(&copy_keeper);
for (int i = 2; i < c->argc; i++) {
sentinelRedisInstance *ri;
@@ -4094,7 +4094,7 @@ void sentinelSetCommand(client *c) {
/* If the target name is the same as the source name there
* is no need to add an entry mapping to itself. */
- if (!dictSdsKeyCaseCompare(NULL,oldname,newname)) {
+ if (!dictSdsKeyCaseCompare(ri->renamed_commands,oldname,newname)) {
oldname = sdsdup(oldname);
newname = sdsdup(newname);
dictAdd(ri->renamed_commands,oldname,newname);
@@ -4431,7 +4431,7 @@ char *sentinelGetLeader(sentinelRedisInstance *master, uint64_t epoch) {
uint64_t max_votes = 0;
serverAssert(master->flags & (SRI_O_DOWN|SRI_FAILOVER_IN_PROGRESS));
- counters = dictCreate(&leaderVotesDictType,NULL);
+ counters = dictCreate(&leaderVotesDictType);
voters = dictSize(master->sentinels)+1; /* All the other sentinels and me.*/
diff --git a/src/server.c b/src/server.c
index dab92dee8..8d232b53b 100644
--- a/src/server.c
+++ b/src/server.c
@@ -1263,23 +1263,23 @@ void exitFromChild(int retcode) {
* keys and redis objects as values (objects can hold SDS strings,
* lists, sets). */
-void dictVanillaFree(void *privdata, void *val)
+void dictVanillaFree(dict *d, void *val)
{
- DICT_NOTUSED(privdata);
+ UNUSED(d);
zfree(val);
}
-void dictListDestructor(void *privdata, void *val)
+void dictListDestructor(dict *d, void *val)
{
- DICT_NOTUSED(privdata);
+ UNUSED(d);
listRelease((list*)val);
}
-int dictSdsKeyCompare(void *privdata, const void *key1,
+int dictSdsKeyCompare(dict *d, const void *key1,
const void *key2)
{
int l1,l2;
- DICT_NOTUSED(privdata);
+ UNUSED(d);
l1 = sdslen((sds)key1);
l2 = sdslen((sds)key2);
@@ -1289,34 +1289,31 @@ int dictSdsKeyCompare(void *privdata, const void *key1,
/* A case insensitive version used for the command lookup table and other
* places where case insensitive non binary-safe comparison is needed. */
-int dictSdsKeyCaseCompare(void *privdata, const void *key1,
+int dictSdsKeyCaseCompare(dict *d, const void *key1,
const void *key2)
{
- DICT_NOTUSED(privdata);
-
+ UNUSED(d);
return strcasecmp(key1, key2) == 0;
}
-void dictObjectDestructor(void *privdata, void *val)
+void dictObjectDestructor(dict *d, void *val)
{
- DICT_NOTUSED(privdata);
-
+ UNUSED(d);
if (val == NULL) return; /* Lazy freeing will set value to NULL. */
decrRefCount(val);
}
-void dictSdsDestructor(void *privdata, void *val)
+void dictSdsDestructor(dict *d, void *val)
{
- DICT_NOTUSED(privdata);
-
+ UNUSED(d);
sdsfree(val);
}
-int dictObjKeyCompare(void *privdata, const void *key1,
+int dictObjKeyCompare(dict *d, const void *key1,
const void *key2)
{
const robj *o1 = key1, *o2 = key2;
- return dictSdsKeyCompare(privdata,o1->ptr,o2->ptr);
+ return dictSdsKeyCompare(d, o1->ptr,o2->ptr);
}
uint64_t dictObjHash(const void *key) {
@@ -1332,8 +1329,7 @@ uint64_t dictSdsCaseHash(const void *key) {
return dictGenCaseHashFunction((unsigned char*)key, sdslen((char*)key));
}
-int dictEncObjKeyCompare(void *privdata, const void *key1,
- const void *key2)
+int dictEncObjKeyCompare(dict *d, const void *key1, const void *key2)
{
robj *o1 = (robj*) key1, *o2 = (robj*) key2;
int cmp;
@@ -1348,7 +1344,7 @@ int dictEncObjKeyCompare(void *privdata, const void *key1,
* objects as well. */
if (o1->refcount != OBJ_STATIC_REFCOUNT) o1 = getDecodedObject(o1);
if (o2->refcount != OBJ_STATIC_REFCOUNT) o2 = getDecodedObject(o2);
- cmp = dictSdsKeyCompare(privdata,o1->ptr,o2->ptr);
+ cmp = dictSdsKeyCompare(d,o1->ptr,o2->ptr);
if (o1->refcount != OBJ_STATIC_REFCOUNT) decrRefCount(o1);
if (o2->refcount != OBJ_STATIC_REFCOUNT) decrRefCount(o2);
return cmp;
@@ -2691,7 +2687,7 @@ void initServerConfig(void) {
server.shutdown_asap = 0;
server.cluster_configfile = zstrdup(CONFIG_DEFAULT_CLUSTER_CONFIG_FILE);
server.cluster_module_flags = CLUSTER_MODULE_FLAG_NONE;
- server.migrate_cached_sockets = dictCreate(&migrateCacheDictType,NULL);
+ server.migrate_cached_sockets = dictCreate(&migrateCacheDictType);
server.next_client_id = 1; /* Client IDs, start from 1 .*/
server.loading_process_events_interval_bytes = (1024*1024*2);
server.page_size = sysconf(_SC_PAGESIZE);
@@ -2750,8 +2746,8 @@ void initServerConfig(void) {
/* Command table -- we initialize it here as it is part of the
* initial configuration, since command names may be changed via
* redis.conf using the rename-command directive. */
- server.commands = dictCreate(&commandTableDictType,NULL);
- server.orig_commands = dictCreate(&commandTableDictType,NULL);
+ server.commands = dictCreate(&commandTableDictType);
+ server.orig_commands = dictCreate(&commandTableDictType);
populateCommandTable();
server.delCommand = lookupCommandByCString("del");
server.multiCommand = lookupCommandByCString("multi");
@@ -3242,20 +3238,20 @@ void initServer(void) {
/* Create the Redis databases, and initialize other internal state. */
for (j = 0; j < server.dbnum; j++) {
- server.db[j].dict = dictCreate(&dbDictType,NULL);
- server.db[j].expires = dictCreate(&dbExpiresDictType,NULL);
+ server.db[j].dict = dictCreate(&dbDictType);
+ server.db[j].expires = dictCreate(&dbExpiresDictType);
server.db[j].expires_cursor = 0;
- server.db[j].blocking_keys = dictCreate(&keylistDictType,NULL);
- server.db[j].ready_keys = dictCreate(&objectKeyPointerValueDictType,NULL);
- server.db[j].watched_keys = dictCreate(&keylistDictType,NULL);
+ server.db[j].blocking_keys = dictCreate(&keylistDictType);
+ server.db[j].ready_keys = dictCreate(&objectKeyPointerValueDictType);
+ server.db[j].watched_keys = dictCreate(&keylistDictType);
server.db[j].id = j;
server.db[j].avg_ttl = 0;
server.db[j].defrag_later = listCreate();
listSetFreeMethod(server.db[j].defrag_later,(void (*)(void*))sdsfree);
}
evictionPoolAlloc(); /* Initialize the LRU keys pool. */
- server.pubsub_channels = dictCreate(&keylistDictType,NULL);
- server.pubsub_patterns = dictCreate(&keylistDictType,NULL);
+ server.pubsub_channels = dictCreate(&keylistDictType);
+ server.pubsub_patterns = dictCreate(&keylistDictType);
server.cronloops = 0;
server.in_eval = 0;
server.in_exec = 0;
diff --git a/src/server.h b/src/server.h
index 11a1c6339..5516dab29 100644
--- a/src/server.h
+++ b/src/server.h
@@ -2409,13 +2409,13 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o);
#define EMPTYDB_NO_FLAGS 0 /* No flags. */
#define EMPTYDB_ASYNC (1<<0) /* Reclaim memory in another thread. */
-long long emptyDb(int dbnum, int flags, void(callback)(void*));
-long long emptyDbStructure(redisDb *dbarray, int dbnum, int async, void(callback)(void*));
+long long emptyDb(int dbnum, int flags, void(callback)(dict*));
+long long emptyDbStructure(redisDb *dbarray, int dbnum, int async, void(callback)(dict*));
void flushAllDataAndResetRDB(int flags);
long long dbTotalServerKeyCount();
dbBackup *backupDb(void);
void restoreDbBackup(dbBackup *backup);
-void discardDbBackup(dbBackup *backup, int flags, void(callback)(void*));
+void discardDbBackup(dbBackup *backup, int flags, void(callback)(dict*));
int selectDb(client *c, int id);
@@ -2519,9 +2519,9 @@ int performEvictions(void);
/* Keys hashing / comparison functions for dict.c hash tables. */
uint64_t dictSdsHash(const void *key);
uint64_t dictSdsCaseHash(const void *key);
-int dictSdsKeyCompare(void *privdata, const void *key1, const void *key2);
-int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2);
-void dictSdsDestructor(void *privdata, void *val);
+int dictSdsKeyCompare(dict *d, const void *key1, const void *key2);
+int dictSdsKeyCaseCompare(dict *d, const void *key1, const void *key2);
+void dictSdsDestructor(dict *d, void *val);
/* Git SHA1 */
char *redisGitSHA1(void);
diff --git a/src/t_hash.c b/src/t_hash.c
index 7a1215b13..68d304c50 100644
--- a/src/t_hash.c
+++ b/src/t_hash.c
@@ -468,7 +468,7 @@ void hashTypeConvertZiplist(robj *o, int enc) {
int ret;
hi = hashTypeInitIterator(o);
- dict = dictCreate(&hashDictType, NULL);
+ dict = dictCreate(&hashDictType);
/* Presize the dict to avoid rehashing */
dictExpand(dict,hashTypeLength(o));
@@ -523,7 +523,7 @@ robj *hashTypeDup(robj *o) {
hobj = createObject(OBJ_HASH, new_zl);
hobj->encoding = OBJ_ENCODING_ZIPLIST;
} else if(o->encoding == OBJ_ENCODING_HT){
- dict *d = dictCreate(&hashDictType, NULL);
+ dict *d = dictCreate(&hashDictType);
dictExpand(d, dictSize((const dict*)o->ptr));
hi = hashTypeInitIterator(o);
@@ -586,7 +586,7 @@ int hashZiplistValidateIntegrity(unsigned char *zl, size_t size, int deep) {
struct {
long count;
dict *fields;
- } data = {0, dictCreate(&hashDictType, NULL)};
+ } data = {0, dictCreate(&hashDictType)};
int ret = ziplistValidateIntegrity(zl, size, 1, _hashZiplistEntryValidation, &data);
@@ -1080,7 +1080,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
* a bit less than the number of elements in the hash, the natural approach
* used into CASE 4 is highly inefficient. */
if (count*HRANDFIELD_SUB_STRATEGY_MUL > size) {
- dict *d = dictCreate(&sdsReplyDictType, NULL);
+ dict *d = dictCreate(&sdsReplyDictType);
dictExpand(d, size);
hashTypeIterator *hi = hashTypeInitIterator(hash);
@@ -1150,7 +1150,7 @@ void hrandfieldWithCountCommand(client *c, long l, int withvalues) {
/* Hashtable encoding (generic implementation) */
unsigned long added = 0;
ziplistEntry key, value;
- dict *d = dictCreate(&hashDictType, NULL);
+ dict *d = dictCreate(&hashDictType);
dictExpand(d, count);
while(added < count) {
hashTypeRandomElement(hash, size, &key, withvalues? &value : NULL);
diff --git a/src/t_set.c b/src/t_set.c
index 57a29a98f..9db05d6fc 100644
--- a/src/t_set.c
+++ b/src/t_set.c
@@ -239,7 +239,7 @@ void setTypeConvert(robj *setobj, int enc) {
if (enc == OBJ_ENCODING_HT) {
int64_t intele;
- dict *d = dictCreate(&setDictType,NULL);
+ dict *d = dictCreate(&setDictType);
sds element;
/* Presize the dict to avoid rehashing */
@@ -723,7 +723,7 @@ void srandmemberWithCountCommand(client *c) {
}
/* For CASE 3 and CASE 4 we need an auxiliary dictionary. */
- d = dictCreate(&sdsReplyDictType,NULL);
+ d = dictCreate(&sdsReplyDictType);
/* CASE 3:
* The number of elements inside the set is not greater than
diff --git a/src/t_zset.c b/src/t_zset.c
index 71344f135..c7681adc7 100644
--- a/src/t_zset.c
+++ b/src/t_zset.c
@@ -1188,7 +1188,7 @@ void zsetConvert(robj *zobj, int encoding) {
serverPanic("Unknown target encoding");
zs = zmalloc(sizeof(*zs));
- zs->dict = dictCreate(&zsetDictType,NULL);
+ zs->dict = dictCreate(&zsetDictType);
zs->zsl = zslCreate();
eptr = ziplistIndex(zl,0);
@@ -1647,7 +1647,7 @@ int zsetZiplistValidateIntegrity(unsigned char *zl, size_t size, int deep) {
struct {
long count;
dict *fields;
- } data = {0, dictCreate(&hashDictType, NULL)};
+ } data = {0, dictCreate(&hashDictType)};
int ret = ziplistValidateIntegrity(zl, size, 1, _zsetZiplistValidateIntegrity, &data);
@@ -2710,7 +2710,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in
zuiClearIterator(&src[0]);
}
} else if (op == SET_OP_UNION) {
- dict *accumulator = dictCreate(&setAccumulatorDictType,NULL);
+ dict *accumulator = dictCreate(&setAccumulatorDictType);
dictIterator *di;
dictEntry *de, *existing;
double score;
@@ -4109,7 +4109,7 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
* a bit less than the number of elements in the set, the natural approach
* used into CASE 4 is highly inefficient. */
if (count*ZRANDMEMBER_SUB_STRATEGY_MUL > size) {
- dict *d = dictCreate(&sdsReplyDictType, NULL);
+ dict *d = dictCreate(&sdsReplyDictType);
dictExpand(d, size);
/* Add all the elements into the temporary dictionary. */
while (zuiNext(&src, &zval)) {
@@ -4169,7 +4169,7 @@ void zrandmemberWithCountCommand(client *c, long l, int withscores) {
/* Hashtable encoding (generic implementation) */
unsigned long added = 0;
- dict *d = dictCreate(&hashDictType, NULL);
+ dict *d = dictCreate(&hashDictType);
dictExpand(d, count);
while (added < count) {