summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWang Yuan <wangyuancode@163.com>2020-12-02 19:56:11 +0800
committerGitHub <noreply@github.com>2020-12-02 13:56:11 +0200
commitb55a827ea2e19bd6cd48f216e8e6caa34299f9b9 (patch)
treed5698bb9421fe70cf76f406989a816ad979a3935
parent84e0489562a0f61b95755dc863bfca79c23b6ef0 (diff)
downloadredis-b55a827ea2e19bd6cd48f216e8e6caa34299f9b9.tar.gz
Backup keys to slots map and restore when fail to sync if diskless-load type is swapdb in cluster mode (#8108)
When replica diskless-load type is swapdb in cluster mode, we didn't backup keys to slots map, so we will lose keys to slots map if fail to sync. Now we backup keys to slots map at first, and restore it properly when fail. This commit includes a refactory/cleanup of the backups mechanism (moving it to db.c and re-structuring it a bit). Co-authored-by: Oran Agra <oran@redislabs.com>
-rw-r--r--src/bio.c2
-rw-r--r--src/db.c204
-rw-r--r--src/lazyfree.c20
-rw-r--r--src/replication.c69
-rw-r--r--src/server.h18
-rw-r--r--tests/cluster/tests/17-diskless-load-swapdb.tcl77
6 files changed, 272 insertions, 118 deletions
diff --git a/src/bio.c b/src/bio.c
index a512def0a..a11bcb18b 100644
--- a/src/bio.c
+++ b/src/bio.c
@@ -203,7 +203,7 @@ void *bioProcessBackgroundJobs(void *arg) {
/* What we free changes depending on what arguments are set:
* arg1 -> free the object at pointer.
* arg2 & arg3 -> free two dictionaries (a Redis DB).
- * only arg3 -> free the skiplist. */
+ * only arg3 -> free the radix tree. */
if (job->arg1)
lazyfreeFreeObjectFromBioThread(job->arg1);
else if (job->arg2 && job->arg3)
diff --git a/src/db.c b/src/db.c
index 32baf0974..b52c6f3f2 100644
--- a/src/db.c
+++ b/src/db.c
@@ -34,6 +34,13 @@
#include <signal.h>
#include <ctype.h>
+/* Database backup. */
+struct dbBackup {
+ redisDb *dbarray;
+ rax *slots_to_keys;
+ uint64_t slots_keys_count[CLUSTER_SLOTS];
+};
+
/*-----------------------------------------------------------------------------
* C-level DB API
*----------------------------------------------------------------------------*/
@@ -359,6 +366,41 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
return o;
}
+/* Remove all keys from the database(s) structure. The dbarray argument
+ * may not be the server main DBs (could be a backup).
+ *
+ * The dbnum can be -1 if all the DBs should be emptied, or the specified
+ * DB index if we want to empty only a single database.
+ * The function returns the number of keys removed from the database(s). */
+long long emptyDbStructure(redisDb *dbarray, int dbnum, int async,
+ void(callback)(void*))
+{
+ long long removed = 0;
+ int startdb, enddb;
+
+ if (dbnum == -1) {
+ startdb = 0;
+ enddb = server.dbnum-1;
+ } else {
+ startdb = enddb = dbnum;
+ }
+
+ for (int j = startdb; j <= enddb; j++) {
+ removed += dictSize(dbarray[j].dict);
+ if (async) {
+ emptyDbAsync(&dbarray[j]);
+ } else {
+ dictEmpty(dbarray[j].dict,callback);
+ dictEmpty(dbarray[j].expires,callback);
+ }
+ /* Because all keys of database are removed, reset average ttl. */
+ dbarray[j].avg_ttl = 0;
+ dbarray[j].expires_cursor = 0;
+ }
+
+ return removed;
+}
+
/* Remove all keys from all the databases in a Redis server.
* If callback is given the function is called from time to time to
* signal that work is in progress.
@@ -367,18 +409,14 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) {
* DB number if we want to flush only a single Redis database number.
*
* Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or
- * 1. EMPTYDB_ASYNC if we want the memory to be freed in a different thread.
- * 2. EMPTYDB_BACKUP if we want to empty the backup dictionaries created by
- * disklessLoadMakeBackups. In that case we only free memory and avoid
- * firing module events.
+ * EMPTYDB_ASYNC if we want the memory to be freed in a different thread
* and the function to return ASAP.
*
* On success the function returns the number of keys removed from the
* database(s). Otherwise -1 is returned in the specific case the
* DB number is out of range, and errno is set to EINVAL. */
-long long emptyDbGeneric(redisDb *dbarray, int dbnum, int flags, void(callback)(void*)) {
+long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
int async = (flags & EMPTYDB_ASYNC);
- int backup = (flags & EMPTYDB_BACKUP); /* Just free the memory, nothing else */
RedisModuleFlushInfoV1 fi = {REDISMODULE_FLUSHINFO_VERSION,!async,dbnum};
long long removed = 0;
@@ -387,63 +425,107 @@ long long emptyDbGeneric(redisDb *dbarray, int dbnum, int flags, void(callback)(
return -1;
}
- /* Pre-flush actions */
- if (!backup) {
- /* Fire the flushdb modules event. */
- moduleFireServerEvent(REDISMODULE_EVENT_FLUSHDB,
- REDISMODULE_SUBEVENT_FLUSHDB_START,
- &fi);
+ /* Fire the flushdb modules event. */
+ moduleFireServerEvent(REDISMODULE_EVENT_FLUSHDB,
+ REDISMODULE_SUBEVENT_FLUSHDB_START,
+ &fi);
- /* Make sure the WATCHed keys are affected by the FLUSH* commands.
- * Note that we need to call the function while the keys are still
- * there. */
- signalFlushedDb(dbnum);
- }
+ /* Make sure the WATCHed keys are affected by the FLUSH* commands.
+ * Note that we need to call the function while the keys are still
+ * there. */
+ signalFlushedDb(dbnum);
- int startdb, enddb;
- if (dbnum == -1) {
- startdb = 0;
- enddb = server.dbnum-1;
- } else {
- startdb = enddb = dbnum;
+ /* Empty redis database structure. */
+ removed = emptyDbStructure(server.db, dbnum, async, callback);
+
+ /* Flush slots to keys map if enable cluster, we can flush entire
+ * slots to keys map whatever dbnum because only support one DB
+ * in cluster mode. */
+ if (server.cluster_enabled) slotToKeyFlush(async);
+
+ if (dbnum == -1) flushSlaveKeysWithExpireList();
+
+ /* Also fire the end event. Note that this event will fire almost
+ * immediately after the start event if the flush is asynchronous. */
+ moduleFireServerEvent(REDISMODULE_EVENT_FLUSHDB,
+ REDISMODULE_SUBEVENT_FLUSHDB_END,
+ &fi);
+
+ return removed;
+}
+
+/* Store a backup of the database for later use, and put an empty one
+ * instead of it. */
+dbBackup *backupDb(void) {
+ dbBackup *backup = zmalloc(sizeof(dbBackup));
+
+ /* Backup main DBs. */
+ backup->dbarray = zmalloc(sizeof(redisDb)*server.dbnum);
+ for (int i=0; i<server.dbnum; i++) {
+ backup->dbarray[i] = server.db[i];
+ server.db[i].dict = dictCreate(&dbDictType,NULL);
+ server.db[i].expires = dictCreate(&keyptrDictType,NULL);
}
- for (int j = startdb; j <= enddb; j++) {
- removed += dictSize(dbarray[j].dict);
- if (async) {
- emptyDbAsync(&dbarray[j]);
- } else {
- dictEmpty(dbarray[j].dict,callback);
- dictEmpty(dbarray[j].expires,callback);
- }
- /* Because we will start a new database, reset average ttl. */
- dbarray[j].avg_ttl = 0;
- dbarray[j].expires_cursor = 0;
+ /* Backup cluster slots to keys map if enable cluster. */
+ if (server.cluster_enabled) {
+ backup->slots_to_keys = server.cluster->slots_to_keys;
+ memcpy(backup->slots_keys_count, server.cluster->slots_keys_count,
+ sizeof(server.cluster->slots_keys_count));
+ server.cluster->slots_to_keys = raxNew();
+ memset(server.cluster->slots_keys_count, 0,
+ sizeof(server.cluster->slots_keys_count));
}
- /* Post-flush actions */
- if (!backup) {
- if (server.cluster_enabled) {
- if (async) {
- slotToKeyFlushAsync();
- } else {
- slotToKeyFlush();
- }
- }
- if (dbnum == -1) flushSlaveKeysWithExpireList();
+ return backup;
+}
+
+/* Discard a previously created backup, this can be slow (similar to FLUSHALL)
+ * Arguments are similar to the ones of emptyDb, see EMPTYDB_ flags. */
+void discardDbBackup(dbBackup *buckup, int flags, void(callback)(void*)) {
+ int async = (flags & EMPTYDB_ASYNC);
- /* Also fire the end event. Note that this event will fire almost
- * immediately after the start event if the flush is asynchronous. */
- moduleFireServerEvent(REDISMODULE_EVENT_FLUSHDB,
- REDISMODULE_SUBEVENT_FLUSHDB_END,
- &fi);
+ /* Release main DBs backup . */
+ emptyDbStructure(buckup->dbarray, -1, async, callback);
+ for (int i=0; i<server.dbnum; i++) {
+ dictRelease(buckup->dbarray[i].dict);
+ dictRelease(buckup->dbarray[i].expires);
}
- return removed;
+ /* Release slots to keys map backup if enable cluster. */
+ if (server.cluster_enabled) freeSlotsToKeysMap(buckup->slots_to_keys, async);
+
+ /* Release buckup. */
+ zfree(buckup->dbarray);
+ zfree(buckup);
}
-long long emptyDb(int dbnum, int flags, void(callback)(void*)) {
- return emptyDbGeneric(server.db, dbnum, flags, callback);
+/* Restore the previously created backup (discarding what currently resides
+ * in the db).
+ * This function should be called after the current contents of the database
+ * was emptied with a previous call to emptyDb (possibly using the async mode). */
+void restoreDbBackup(dbBackup *buckup) {
+ /* Restore main DBs. */
+ for (int i=0; i<server.dbnum; i++) {
+ serverAssert(dictSize(server.db[i].dict) == 0);
+ serverAssert(dictSize(server.db[i].expires) == 0);
+ dictRelease(server.db[i].dict);
+ dictRelease(server.db[i].expires);
+ server.db[i] = buckup->dbarray[i];
+ }
+
+ /* Restore slots to keys map backup if enable cluster. */
+ if (server.cluster_enabled) {
+ serverAssert(server.cluster->slots_to_keys->numele == 0);
+ raxFree(server.cluster->slots_to_keys);
+ server.cluster->slots_to_keys = buckup->slots_to_keys;
+ memcpy(server.cluster->slots_keys_count, buckup->slots_keys_count,
+ sizeof(server.cluster->slots_keys_count));
+ }
+
+ /* Release buckup. */
+ zfree(buckup->dbarray);
+ zfree(buckup);
}
int selectDb(client *c, int id) {
@@ -1796,11 +1878,25 @@ void slotToKeyDel(sds key) {
slotToKeyUpdateKey(key,0);
}
-void slotToKeyFlush(void) {
- raxFree(server.cluster->slots_to_keys);
+/* Release the radix tree mapping Redis Cluster keys to slots. If 'async'
+ * is true, we release it asynchronously. */
+void freeSlotsToKeysMap(rax *rt, int async) {
+ if (async) {
+ freeSlotsToKeysMapAsync(rt);
+ } else {
+ raxFree(rt);
+ }
+}
+
+/* Empty the slots-keys map of Redis CLuster by creating a new empty one and
+ * freeing the old one. */
+void slotToKeyFlush(int async) {
+ rax *old = server.cluster->slots_to_keys;
+
server.cluster->slots_to_keys = raxNew();
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
+ freeSlotsToKeysMap(old, async);
}
/* Populate the specified array of objects with keys in the specified slot.
diff --git a/src/lazyfree.c b/src/lazyfree.c
index 5a78d5a55..641ab4e64 100644
--- a/src/lazyfree.c
+++ b/src/lazyfree.c
@@ -158,16 +158,10 @@ void emptyDbAsync(redisDb *db) {
bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,oldht1,oldht2);
}
-/* Empty the slots-keys map of Redis CLuster by creating a new empty one
- * and scheduling the old for lazy freeing. */
-void slotToKeyFlushAsync(void) {
- rax *old = server.cluster->slots_to_keys;
-
- server.cluster->slots_to_keys = raxNew();
- memset(server.cluster->slots_keys_count,0,
- sizeof(server.cluster->slots_keys_count));
- atomicIncr(lazyfree_objects,old->numele);
- bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,old);
+/* Release the radix tree mapping Redis Cluster keys to slots asynchronously. */
+void freeSlotsToKeysMapAsync(rax *rt) {
+ atomicIncr(lazyfree_objects,rt->numele);
+ bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,rt);
}
/* Release objects from the lazyfree thread. It's just decrRefCount()
@@ -180,9 +174,7 @@ void lazyfreeFreeObjectFromBioThread(robj *o) {
/* Release a database from the lazyfree thread. The 'db' pointer is the
* database which was substituted with a fresh one in the main thread
- * when the database was logically deleted. 'sl' is a skiplist used by
- * Redis Cluster in order to take the hash slots -> keys mapping. This
- * may be NULL if Redis Cluster is disabled. */
+ * when the database was logically deleted. */
void lazyfreeFreeDatabaseFromBioThread(dict *ht1, dict *ht2) {
size_t numkeys = dictSize(ht1);
dictRelease(ht1);
@@ -191,7 +183,7 @@ void lazyfreeFreeDatabaseFromBioThread(dict *ht1, dict *ht2) {
atomicIncr(lazyfreed_objects,numkeys);
}
-/* Release the skiplist mapping Redis Cluster keys to slots in the
+/* Release the radix tree mapping Redis Cluster keys to slots in the
* lazyfree thread. */
void lazyfreeFreeSlotsMapFromBioThread(rax *rt) {
size_t len = rt->numele;
diff --git a/src/replication.c b/src/replication.c
index a288478ff..64aa41390 100644
--- a/src/replication.c
+++ b/src/replication.c
@@ -1432,16 +1432,10 @@ static int useDisklessLoad() {
}
/* Helper function for readSyncBulkPayload() to make backups of the current
- * DBs before socket-loading the new ones. The backups may be restored later
- * or freed by disklessLoadRestoreBackups(). */
-redisDb *disklessLoadMakeBackups(void) {
- redisDb *backups = zmalloc(sizeof(redisDb)*server.dbnum);
- for (int i=0; i<server.dbnum; i++) {
- backups[i] = server.db[i];
- server.db[i].dict = dictCreate(&dbDictType,NULL);
- server.db[i].expires = dictCreate(&keyptrDictType,NULL);
- }
- return backups;
+ * databases before socket-loading the new ones. The backups may be restored
+ * by disklessLoadRestoreBackup or freed by disklessLoadDiscardBackup later. */
+dbBackup *disklessLoadMakeBackup(void) {
+ return backupDb();
}
/* Helper function for readSyncBulkPayload(): when replica-side diskless
@@ -1449,30 +1443,15 @@ redisDb *disklessLoadMakeBackups(void) {
* before loading the new ones from the socket.
*
* If the socket loading went wrong, we want to restore the old backups
- * into the server databases. This function does just that in the case
- * the 'restore' argument (the number of DBs to replace) is non-zero.
- *
- * When instead the loading succeeded we want just to free our old backups,
- * in that case the function will do just that when 'restore' is 0. */
-void disklessLoadRestoreBackups(redisDb *backup, int restore, int empty_db_flags)
-{
- if (restore) {
- /* Restore. */
- emptyDbGeneric(server.db,-1,empty_db_flags,replicationEmptyDbCallback);
- for (int i=0; i<server.dbnum; i++) {
- dictRelease(server.db[i].dict);
- dictRelease(server.db[i].expires);
- server.db[i] = backup[i];
- }
- } else {
- /* Delete (Pass EMPTYDB_BACKUP in order to avoid firing module events) . */
- emptyDbGeneric(backup,-1,empty_db_flags|EMPTYDB_BACKUP,replicationEmptyDbCallback);
- for (int i=0; i<server.dbnum; i++) {
- dictRelease(backup[i].dict);
- dictRelease(backup[i].expires);
- }
- }
- zfree(backup);
+ * into the server databases. */
+void disklessLoadRestoreBackup(dbBackup *buckup) {
+ restoreDbBackup(buckup);
+}
+
+/* Helper function for readSyncBulkPayload() to discard our old backups
+ * when the loading succeeded. */
+void disklessLoadDiscardBackup(dbBackup *buckup, int flag) {
+ discardDbBackup(buckup, flag, replicationEmptyDbCallback);
}
/* Asynchronously read the SYNC payload we receive from a master */
@@ -1481,7 +1460,7 @@ void readSyncBulkPayload(connection *conn) {
char buf[PROTO_IOBUF_LEN];
ssize_t nread, readlen, nwritten;
int use_diskless_load = useDisklessLoad();
- redisDb *diskless_load_backup = NULL;
+ dbBackup *diskless_load_backup = NULL;
int empty_db_flags = server.repl_slave_lazy_flush ? EMPTYDB_ASYNC :
EMPTYDB_NO_FLAGS;
off_t left;
@@ -1662,11 +1641,11 @@ void readSyncBulkPayload(connection *conn) {
server.repl_diskless_load == REPL_DISKLESS_LOAD_SWAPDB)
{
/* Create a backup of server.db[] and initialize to empty
- * dictionaries */
- diskless_load_backup = disklessLoadMakeBackups();
+ * dictionaries. */
+ diskless_load_backup = disklessLoadMakeBackup();
}
/* We call to emptyDb even in case of REPL_DISKLESS_LOAD_SWAPDB
- * (Where disklessLoadMakeBackups left server.db empty) because we
+ * (Where disklessLoadMakeBackup left server.db empty) because we
* want to execute all the auxiliary logic of emptyDb (Namely,
* fire module events) */
emptyDb(-1,empty_db_flags,replicationEmptyDbCallback);
@@ -1696,14 +1675,14 @@ void readSyncBulkPayload(connection *conn) {
"from socket");
cancelReplicationHandshake(1);
rioFreeConn(&rdb, NULL);
+
+ /* Remove the half-loaded data in case we started with
+ * an empty replica. */
+ emptyDb(-1,empty_db_flags,replicationEmptyDbCallback);
+
if (server.repl_diskless_load == REPL_DISKLESS_LOAD_SWAPDB) {
/* Restore the backed up databases. */
- disklessLoadRestoreBackups(diskless_load_backup,1,
- empty_db_flags);
- } else {
- /* Remove the half-loaded data in case we started with
- * an empty replica. */
- emptyDb(-1,empty_db_flags,replicationEmptyDbCallback);
+ disklessLoadRestoreBackup(diskless_load_backup);
}
/* Note that there's no point in restarting the AOF on SYNC
@@ -1718,7 +1697,7 @@ void readSyncBulkPayload(connection *conn) {
/* Delete the backup databases we created before starting to load
* the new RDB. Now the RDB was loaded with success so the old
* data is useless. */
- disklessLoadRestoreBackups(diskless_load_backup,0,empty_db_flags);
+ disklessLoadDiscardBackup(diskless_load_backup, empty_db_flags);
}
/* Verify the end mark is correct. */
diff --git a/src/server.h b/src/server.h
index 0bcbeabfb..a858d5833 100644
--- a/src/server.h
+++ b/src/server.h
@@ -684,6 +684,11 @@ typedef struct redisDb {
list *defrag_later; /* List of key names to attempt to defrag one by one, gradually. */
} redisDb;
+/* Declare database backup that include redis main DBs and slots to keys map.
+ * Definition is in db.c. We can't define it here since we define CLUSTER_SLOTS
+ * in cluster.h. */
+typedef struct dbBackup dbBackup;
+
/* Client MULTI/EXEC state */
typedef struct multiCmd {
robj **argv;
@@ -2208,11 +2213,14 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o);
#define EMPTYDB_NO_FLAGS 0 /* No flags. */
#define EMPTYDB_ASYNC (1<<0) /* Reclaim memory in another thread. */
-#define EMPTYDB_BACKUP (1<<2) /* DB array is a backup for REPL_DISKLESS_LOAD_SWAPDB. */
long long emptyDb(int dbnum, int flags, void(callback)(void*));
-long long emptyDbGeneric(redisDb *dbarray, int dbnum, int flags, void(callback)(void*));
+long long emptyDbStructure(redisDb *dbarray, int dbnum, int async, void(callback)(void*));
void flushAllDataAndResetRDB(int flags);
long long dbTotalServerKeyCount();
+dbBackup *backupDb(void);
+void restoreDbBackup(dbBackup *buckup);
+void discardDbBackup(dbBackup *buckup, int flags, void(callback)(void*));
+
int selectDb(client *c, int id);
void signalModifiedKey(client *c, redisDb *db, robj *key);
@@ -2225,13 +2233,15 @@ void scanGenericCommand(client *c, robj *o, unsigned long cursor);
int parseScanCursorOrReply(client *c, robj *o, unsigned long *cursor);
void slotToKeyAdd(sds key);
void slotToKeyDel(sds key);
-void slotToKeyFlush(void);
int dbAsyncDelete(redisDb *db, robj *key);
void emptyDbAsync(redisDb *db);
-void slotToKeyFlushAsync(void);
+void slotToKeyFlush(int async);
size_t lazyfreeGetPendingObjectsCount(void);
size_t lazyfreeGetFreedObjectsCount(void);
void freeObjAsync(robj *key, robj *obj);
+void freeSlotsToKeysMapAsync(rax *rt);
+void freeSlotsToKeysMap(rax *rt, int async);
+
/* API to get key arguments from commands */
int *getKeysPrepareResult(getKeysResult *result, int numkeys);
diff --git a/tests/cluster/tests/17-diskless-load-swapdb.tcl b/tests/cluster/tests/17-diskless-load-swapdb.tcl
new file mode 100644
index 000000000..a035be7be
--- /dev/null
+++ b/tests/cluster/tests/17-diskless-load-swapdb.tcl
@@ -0,0 +1,77 @@
+# Check replica can restore database buckup correctly if fail to diskless load.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a primary with a replica" {
+ create_cluster 1 1
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Right to restore backups when fail to diskless load " {
+ set master [Rn 0]
+ set replica [Rn 1]
+ set master_id 0
+ set replica_id 1
+
+ $replica READONLY
+ $replica config set repl-diskless-load swapdb
+ $replica config rewrite
+ $master config set repl-backlog-size 1024
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ $master config set rdb-key-save-delay 10000
+ $master config set rdbcompression no
+ $master config set appendonly no
+ $master config set save ""
+
+ # Write a key that belongs to slot 0
+ set slot0_key "06S"
+ $master set $slot0_key 1
+ after 100
+ assert_equal {1} [$replica get $slot0_key]
+ assert_equal $slot0_key [$replica CLUSTER GETKEYSINSLOT 0 1]
+
+ # Kill the replica
+ kill_instance redis $replica_id
+
+ # Delete the key from master
+ $master del $slot0_key
+
+ # Replica must full sync with master when start because replication
+ # backlog size is very small, and dumping rdb will cost several seconds.
+ set num 10000
+ set value [string repeat A 1024]
+ set rd [redis_deferring_client redis $master_id]
+ for {set j 0} {$j < $num} {incr j} {
+ $rd set $j $value
+ }
+ for {set j 0} {$j < $num} {incr j} {
+ $rd read
+ }
+
+ # Start the replica again
+ restart_instance redis $replica_id
+ $replica READONLY
+
+ # Start full sync
+ wait_for_condition 500 10 {
+ [string match "*sync*" [$replica role]]
+ } else {
+ fail "Fail to full sync"
+ }
+ after 100
+
+ # Kill master, abort full sync
+ kill_instance redis $master_id
+
+ # Replica keys and keys to slots map still both are right
+ assert_equal {1} [$replica get $slot0_key]
+ assert_equal $slot0_key [$replica CLUSTER GETKEYSINSLOT 0 1]
+} \ No newline at end of file