summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMadelyn Olson <34459052+madolson@users.noreply.github.com>2022-07-10 22:00:44 -0700
committerGitHub <noreply@github.com>2022-07-10 22:00:44 -0700
commite6a1b2ea9534053213cb92b08f610f07c381695d (patch)
treebe28e47581950398cd9abc53ccd4cf4a915f0826
parent1209dc2277342491b100f298d311510c86b0a2f2 (diff)
downloadredis-e6a1b2ea9534053213cb92b08f610f07c381695d.tar.gz
Fix crash during handshake and cluster shards call (#10942)
* Fix an engine crash when there are nodes in handshaking and a user calls cluster shards
-rw-r--r--src/cluster.c8
-rw-r--r--tests/cluster/tests/28-cluster-shards.tcl15
2 files changed, 20 insertions, 3 deletions
diff --git a/src/cluster.c b/src/cluster.c
index b434bcc02..ae353e1dd 100644
--- a/src/cluster.c
+++ b/src/cluster.c
@@ -1220,10 +1220,14 @@ clusterNode *clusterLookupNode(const char *name, int length) {
return dictGetVal(de);
}
-/* Get all the nodes serving the same slots as myself. */
+/* Get all the nodes serving the same slots as the given node. */
list *clusterGetNodesServingMySlots(clusterNode *node) {
list *nodes_for_slot = listCreate();
clusterNode *my_primary = nodeIsMaster(node) ? node : node->slaveof;
+
+ /* This function is only valid for fully connected nodes, so
+ * they should have a known primary. */
+ serverAssert(my_primary);
listAddNodeTail(nodes_for_slot, my_primary);
for (int i=0; i < my_primary->numslaves; i++) {
listAddNodeTail(nodes_for_slot, my_primary->slaves[i]);
@@ -5103,7 +5107,7 @@ void clusterReplyShards(client *c) {
* information and an empty slots array. */
while((de = dictNext(di)) != NULL) {
clusterNode *n = dictGetVal(de);
- if (nodeIsSlave(n)) {
+ if (!nodeIsMaster(n)) {
/* You can force a replica to own slots, even though it'll get reverted,
* so freeing the slot pair here just in case. */
clusterFreeNodesSlotsInfo(n);
diff --git a/tests/cluster/tests/28-cluster-shards.tcl b/tests/cluster/tests/28-cluster-shards.tcl
index fe794f2b7..9894d4b33 100644
--- a/tests/cluster/tests/28-cluster-shards.tcl
+++ b/tests/cluster/tests/28-cluster-shards.tcl
@@ -182,4 +182,17 @@ test "Test the replica reports a loading state while it's loading" {
# Final sanity, the replica agrees it is online.
assert_equal "online" [dict get [get_node_info_from_shard $replica_cluster_id $replica_id "node"] health]
-} \ No newline at end of file
+}
+
+test "Regression test for a crash when calling SHARDS during handshake" {
+ # Reset forget a node, so we can use it to establish handshaking connections
+ set id [R 19 CLUSTER MYID]
+ R 19 CLUSTER RESET HARD
+ for {set i 0} {$i < 19} {incr i} {
+ R $i CLUSTER FORGET $id
+ }
+ R 19 cluster meet 127.0.0.1 [get_instance_attrib redis 0 port]
+ # This should line would previously crash, since all the outbound
+ # connections were in handshake state.
+ R 19 CLUSTER SHARDS
+}