summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBar Shaul <88437685+barshaul@users.noreply.github.com>2022-01-10 13:03:48 +0200
committerGitHub <noreply@github.com>2022-01-10 13:03:48 +0200
commit80f1d2f3f056f5e4e81796158dd29b0799f54dc7 (patch)
treee1a487095372bbbec35b2ef822188096c3ce44c3
parenta5b4dcbb414bdc51ad50439bf82ec39055084a09 (diff)
downloadredis-py-80f1d2f3f056f5e4e81796158dd29b0799f54dc7.tar.gz
Clusters should optionally require full slot coverage (#1845)
-rw-r--r--redis/cluster.py75
-rw-r--r--tests/test_cluster.py86
2 files changed, 44 insertions, 117 deletions
diff --git a/redis/cluster.py b/redis/cluster.py
index ec75274..5464723 100644
--- a/redis/cluster.py
+++ b/redis/cluster.py
@@ -387,8 +387,7 @@ class RedisCluster(RedisClusterCommands):
port=6379,
startup_nodes=None,
cluster_error_retry_attempts=3,
- require_full_coverage=True,
- skip_full_coverage_check=False,
+ require_full_coverage=False,
reinitialize_steps=10,
read_from_replicas=False,
url=None,
@@ -404,16 +403,15 @@ class RedisCluster(RedisClusterCommands):
:port: 'int'
Can be used to point to a startup node
:require_full_coverage: 'bool'
- If set to True, as it is by default, all slots must be covered.
- If set to False and not all slots are covered, the instance
- creation will succeed only if 'cluster-require-full-coverage'
- configuration is set to 'no' in all of the cluster's nodes.
- Otherwise, RedisClusterException will be thrown.
- :skip_full_coverage_check: 'bool'
- If require_full_coverage is set to False, a check of
- cluster-require-full-coverage config will be executed against all
- nodes. Set skip_full_coverage_check to True to skip this check.
- Useful for clusters without the CONFIG command (like ElastiCache)
+ When set to False (default value): the client will not require a
+ full coverage of the slots. However, if not all slots are covered,
+ and at least one node has 'cluster-require-full-coverage' set to
+ 'yes,' the server will throw a ClusterDownError for some key-based
+ commands. See -
+ https://redis.io/topics/cluster-tutorial#redis-cluster-configuration-parameters
+ When set to True: all slots must be covered to construct the
+ cluster client. If not all slots are covered, RedisClusterException
+ will be thrown.
:read_from_replicas: 'bool'
Enable read from replicas in READONLY mode. You can read possibly
stale data.
@@ -510,7 +508,6 @@ class RedisCluster(RedisClusterCommands):
startup_nodes=startup_nodes,
from_url=from_url,
require_full_coverage=require_full_coverage,
- skip_full_coverage_check=skip_full_coverage_check,
**kwargs,
)
@@ -1111,8 +1108,7 @@ class NodesManager:
self,
startup_nodes,
from_url=False,
- require_full_coverage=True,
- skip_full_coverage_check=False,
+ require_full_coverage=False,
lock=None,
**kwargs,
):
@@ -1123,7 +1119,6 @@ class NodesManager:
self.populate_startup_nodes(startup_nodes)
self.from_url = from_url
self._require_full_coverage = require_full_coverage
- self._skip_full_coverage_check = skip_full_coverage_check
self._moved_exception = None
self.connection_kwargs = kwargs
self.read_load_balancer = LoadBalancer()
@@ -1249,32 +1244,6 @@ class NodesManager:
for n in nodes:
self.startup_nodes[n.name] = n
- def cluster_require_full_coverage(self, cluster_nodes):
- """
- if exists 'cluster-require-full-coverage no' config on redis servers,
- then even all slots are not covered, cluster still will be able to
- respond
- """
-
- def node_require_full_coverage(node):
- try:
- return (
- "yes"
- in node.redis_connection.config_get(
- "cluster-require-full-coverage"
- ).values()
- )
- except ConnectionError:
- return False
- except Exception as e:
- raise RedisClusterException(
- 'ERROR sending "config get cluster-require-full-coverage"'
- f" command to redis server: {node.name}, {e}"
- )
-
- # at least one node should have cluster-require-full-coverage yes
- return any(node_require_full_coverage(node) for node in cluster_nodes.values())
-
def check_slots_coverage(self, slots_cache):
# Validate if all slots are covered or if we should try next
# startup node
@@ -1450,29 +1419,9 @@ class NodesManager:
# isn't a full coverage
raise RedisClusterException(
f"All slots are not covered after query all startup_nodes. "
- f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} "
+ f"{len(tmp_slots)} of {REDIS_CLUSTER_HASH_SLOTS} "
f"covered..."
)
- elif not fully_covered and not self._require_full_coverage:
- # The user set require_full_coverage to False.
- # In case of full coverage requirement in the cluster's Redis
- # configurations, we will raise an exception. Otherwise, we may
- # continue with partial coverage.
- # see Redis Cluster configuration parameters in
- # https://redis.io/topics/cluster-tutorial
- if (
- not self._skip_full_coverage_check
- and self.cluster_require_full_coverage(tmp_nodes_cache)
- ):
- raise RedisClusterException(
- "Not all slots are covered but the cluster's "
- "configuration requires full coverage. Set "
- "cluster-require-full-coverage configuration to no on "
- "all of the cluster nodes if you wish the cluster to "
- "be able to serve without being fully covered."
- f"{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} "
- f"covered..."
- )
# Set the tmp variables to the real variables
self.nodes_cache = tmp_nodes_cache
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
index 496ed98..90f52d4 100644
--- a/tests/test_cluster.py
+++ b/tests/test_cluster.py
@@ -28,6 +28,7 @@ from redis.exceptions import (
NoPermissionError,
RedisClusterException,
RedisError,
+ ResponseError,
)
from redis.utils import str_if_bytes
from tests.test_pubsub import wait_for_message
@@ -628,6 +629,32 @@ class TestRedisClusterObj:
assert replica.server_type == REPLICA
assert replica in slot_nodes
+ def test_not_require_full_coverage_cluster_down_error(self, r):
+ """
+ When require_full_coverage is set to False (default client config) and not
+ all slots are covered, if one of the nodes has 'cluster-require_full_coverage'
+ config set to 'yes' some key-based commands should throw ClusterDownError
+ """
+ node = r.get_node_from_key("foo")
+ missing_slot = r.keyslot("foo")
+ assert r.set("foo", "bar") is True
+ try:
+ assert all(r.cluster_delslots(missing_slot))
+ with pytest.raises(ClusterDownError):
+ r.exists("foo")
+ finally:
+ try:
+ # Add back the missing slot
+ assert r.cluster_addslots(node, missing_slot) is True
+ # Make sure we are not getting ClusterDownError anymore
+ assert r.exists("foo") == 1
+ except ResponseError as e:
+ if f"Slot {missing_slot} is already busy" in str(e):
+ # It can happen if the test failed to delete this slot
+ pass
+ else:
+ raise e
+
@pytest.mark.onlycluster
class TestClusterRedisCommands:
@@ -1849,39 +1876,19 @@ class TestNodesManager:
]
with pytest.raises(RedisClusterException) as ex:
get_mocked_redis_client(
- host=default_host, port=default_port, cluster_slots=cluster_slots
- )
- assert str(ex.value).startswith(
- "All slots are not covered after query all startup_nodes."
- )
-
- def test_init_slots_cache_not_require_full_coverage_error(self):
- """
- When require_full_coverage is set to False and not all slots are
- covered, if one of the nodes has 'cluster-require_full_coverage'
- config set to 'yes' the cluster initialization should fail
- """
- # Missing slot 5460
- cluster_slots = [
- [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
- ]
-
- with pytest.raises(RedisClusterException):
- get_mocked_redis_client(
host=default_host,
port=default_port,
cluster_slots=cluster_slots,
- require_full_coverage=False,
- coverage_result="yes",
+ require_full_coverage=True,
)
+ assert str(ex.value).startswith(
+ "All slots are not covered after query all startup_nodes."
+ )
def test_init_slots_cache_not_require_full_coverage_success(self):
"""
When require_full_coverage is set to False and not all slots are
- covered, if all of the nodes has 'cluster-require_full_coverage'
- config set to 'no' the cluster initialization should succeed
+ covered the cluster client initialization should succeed
"""
# Missing slot 5460
cluster_slots = [
@@ -1895,39 +1902,10 @@ class TestNodesManager:
port=default_port,
cluster_slots=cluster_slots,
require_full_coverage=False,
- coverage_result="no",
)
assert 5460 not in rc.nodes_manager.slots_cache
- def test_init_slots_cache_not_require_full_coverage_skips_check(self):
- """
- Test that when require_full_coverage is set to False and
- skip_full_coverage_check is set to true, the cluster initialization
- succeed without checking the nodes' Redis configurations
- """
- # Missing slot 5460
- cluster_slots = [
- [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
- [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
- [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
- ]
-
- with patch.object(
- NodesManager, "cluster_require_full_coverage"
- ) as conf_check_mock:
- rc = get_mocked_redis_client(
- host=default_host,
- port=default_port,
- cluster_slots=cluster_slots,
- require_full_coverage=False,
- skip_full_coverage_check=True,
- coverage_result="no",
- )
-
- assert conf_check_mock.called is False
- assert 5460 not in rc.nodes_manager.slots_cache
-
def test_init_slots_cache(self):
"""
Test that slots cache can in initialized and all slots are covered