summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/conftest.py101
-rw-r--r--tests/test_cluster.py2482
-rw-r--r--tests/test_command_parser.py62
-rw-r--r--tests/test_commands.py148
-rw-r--r--tests/test_connection.py1
-rw-r--r--tests/test_connection_pool.py5
-rw-r--r--tests/test_helpers.py26
-rw-r--r--tests/test_json.py54
-rw-r--r--tests/test_lock.py2
-rw-r--r--tests/test_monitor.py2
-rw-r--r--tests/test_pipeline.py14
-rw-r--r--tests/test_pubsub.py13
-rw-r--r--tests/test_scripting.py1
-rw-r--r--tests/test_search.py365
-rw-r--r--tests/test_sentinel.py15
-rw-r--r--tests/test_timeseries.py1
16 files changed, 3186 insertions, 106 deletions
diff --git a/tests/conftest.py b/tests/conftest.py
index 31d3fbd..ddc0834 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,8 +3,10 @@ from redis.retry import Retry
import pytest
import random
import redis
+import time
from distutils.version import LooseVersion
from redis.connection import parse_url
+from redis.exceptions import RedisClusterException
from unittest.mock import Mock
from urllib.parse import urlparse
@@ -13,6 +15,7 @@ REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/9"
default_redismod_url = "redis://localhost:36379"
+default_cluster_nodes = 6
def pytest_addoption(parser):
@@ -27,11 +30,18 @@ def pytest_addoption(parser):
" with loaded modules,"
" defaults to `%(default)s`")
+ parser.addoption('--redis-cluster-nodes', default=default_cluster_nodes,
+ action="store",
+ help="The number of cluster nodes that need to be "
+ "available before the test can start,"
+ " defaults to `%(default)s`")
+
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
- if 'dping' in client.__commands__:
+ cmds = [command.upper() for command in client.command().keys()]
+ if 'dping' in cmds:
info["enterprise"] = True
else:
info["enterprise"] = False
@@ -44,8 +54,10 @@ def pytest_sessionstart(session):
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
+ cluster_enabled = info["cluster_enabled"]
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
+ REDIS_INFO["cluster_enabled"] = cluster_enabled
REDIS_INFO["enterprise"] = info["enterprise"]
# module info, if the second redis is running
@@ -58,6 +70,42 @@ def pytest_sessionstart(session):
except KeyError:
pass
+ if cluster_enabled:
+ cluster_nodes = session.config.getoption("--redis-cluster-nodes")
+ wait_for_cluster_creation(redis_url, cluster_nodes)
+
+
+def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=20):
+ """
+ Waits for the cluster creation to complete.
+ As soon as all :cluster_nodes: nodes become available, the cluster will be
+ considered ready.
+ :param redis_url: the cluster's url, e.g. redis://localhost:16379/0
+ :param cluster_nodes: The number of nodes in the cluster
+ :param timeout: the amount of time to wait (in seconds)
+ """
+ now = time.time()
+ end_time = now + timeout
+ client = None
+ print("Waiting for {0} cluster nodes to become available".
+ format(cluster_nodes))
+ while now < end_time:
+ try:
+ client = redis.RedisCluster.from_url(redis_url)
+ if len(client.get_nodes()) == cluster_nodes:
+ print("All nodes are available!")
+ break
+ except RedisClusterException:
+ pass
+ time.sleep(1)
+ now = time.time()
+ if now >= end_time:
+ available_nodes = 0 if client is None else len(client.get_nodes())
+ raise RedisClusterException(
+ "The cluster did not become available after {0} seconds. "
+ "Only {1} nodes out of {2} are available".format(
+ timeout, available_nodes, cluster_nodes))
+
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
@@ -101,13 +149,12 @@ def skip_ifmodversion_lt(min_version: str, module_name: str):
def skip_if_redis_enterprise(func):
check = REDIS_INFO["enterprise"] is True
- return pytest.mark.skipif(check, reason="Redis enterprise"
- )
+ return pytest.mark.skipif(check, reason="Redis enterprise")
def skip_ifnot_redis_enterprise(func):
check = REDIS_INFO["enterprise"] is False
- return pytest.mark.skipif(check, reason="Redis enterprise")
+ return pytest.mark.skipif(check, reason="Not running in redis enterprise")
def _get_client(cls, request, single_connection_client=True, flushdb=True,
@@ -124,27 +171,47 @@ def _get_client(cls, request, single_connection_client=True, flushdb=True,
redis_url = request.config.getoption("--redis-url")
else:
redis_url = from_url
- url_options = parse_url(redis_url)
- url_options.update(kwargs)
- pool = redis.ConnectionPool(**url_options)
- client = cls(connection_pool=pool)
+ cluster_mode = REDIS_INFO["cluster_enabled"]
+ if not cluster_mode:
+ url_options = parse_url(redis_url)
+ url_options.update(kwargs)
+ pool = redis.ConnectionPool(**url_options)
+ client = cls(connection_pool=pool)
+ else:
+ client = redis.RedisCluster.from_url(redis_url, **kwargs)
+ single_connection_client = False
if single_connection_client:
client = client.client()
if request:
def teardown():
- if flushdb:
- try:
- client.flushdb()
- except redis.ConnectionError:
- # handle cases where a test disconnected a client
- # just manually retry the flushdb
- client.flushdb()
- client.close()
- client.connection_pool.disconnect()
+ if not cluster_mode:
+ if flushdb:
+ try:
+ client.flushdb()
+ except redis.ConnectionError:
+ # handle cases where a test disconnected a client
+ # just manually retry the flushdb
+ client.flushdb()
+ client.close()
+ client.connection_pool.disconnect()
+ else:
+ cluster_teardown(client, flushdb)
request.addfinalizer(teardown)
return client
+def cluster_teardown(client, flushdb):
+ if flushdb:
+ try:
+ client.flushdb(target_nodes='primaries')
+ except redis.ConnectionError:
+ # handle cases where a test disconnected a client
+ # just manually retry the flushdb
+ client.flushdb(target_nodes='primaries')
+ client.close()
+ client.disconnect_connection_pools()
+
+
# specifically set to the zero database, because creating
# an index on db != 0 raises a ResponseError in redis
@pytest.fixture()
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
new file mode 100644
index 0000000..071cb7d
--- /dev/null
+++ b/tests/test_cluster.py
@@ -0,0 +1,2482 @@
+import binascii
+import datetime
+import pytest
+import warnings
+
+from time import sleep
+from tests.test_pubsub import wait_for_message
+from unittest.mock import call, patch, DEFAULT, Mock
+from redis import Redis
+from redis.cluster import get_node_name, ClusterNode, \
+ RedisCluster, NodesManager, PRIMARY, REDIS_CLUSTER_HASH_SLOTS, REPLICA
+from redis.commands import CommandsParser
+from redis.connection import Connection
+from redis.utils import str_if_bytes
+from redis.exceptions import (
+ AskError,
+ ClusterDownError,
+ DataError,
+ MovedError,
+ RedisClusterException,
+ RedisError
+)
+
+from redis.crc import key_slot
+from .conftest import (
+ _get_client,
+ skip_if_server_version_lt,
+ skip_unless_arch_bits
+)
+
+default_host = "127.0.0.1"
+default_port = 7000
+default_cluster_slots = [
+ [
+ 0, 8191,
+ ['127.0.0.1', 7000, 'node_0'],
+ ['127.0.0.1', 7003, 'node_3'],
+ ],
+ [
+ 8192, 16383,
+ ['127.0.0.1', 7001, 'node_1'],
+ ['127.0.0.1', 7002, 'node_2']
+ ]
+]
+
+
+@pytest.fixture()
+def slowlog(request, r):
+ """
+ Set the slowlog threshold to 0, and the
+ max length to 128. This will force every
+ command into the slowlog and allow us
+ to test it
+ """
+ # Save old values
+ current_config = r.config_get(
+ target_nodes=r.get_primaries()[0])
+ old_slower_than_value = current_config['slowlog-log-slower-than']
+ old_max_legnth_value = current_config['slowlog-max-len']
+
+ # Function to restore the old values
+ def cleanup():
+ r.config_set('slowlog-log-slower-than', old_slower_than_value)
+ r.config_set('slowlog-max-len', old_max_legnth_value)
+
+ request.addfinalizer(cleanup)
+
+ # Set the new values
+ r.config_set('slowlog-log-slower-than', 0)
+ r.config_set('slowlog-max-len', 128)
+
+
+def get_mocked_redis_client(func=None, *args, **kwargs):
+ """
+ Return a stable RedisCluster object that have deterministic
+ nodes and slots setup to remove the problem of different IP addresses
+ on different installations and machines.
+ """
+ cluster_slots = kwargs.pop('cluster_slots', default_cluster_slots)
+ coverage_res = kwargs.pop('coverage_result', 'yes')
+ with patch.object(Redis, 'execute_command') as execute_command_mock:
+ def execute_command(*_args, **_kwargs):
+ if _args[0] == 'CLUSTER SLOTS':
+ mock_cluster_slots = cluster_slots
+ return mock_cluster_slots
+ elif _args[0] == 'COMMAND':
+ return {'get': [], 'set': []}
+ elif _args[1] == 'cluster-require-full-coverage':
+ return {'cluster-require-full-coverage': coverage_res}
+ elif func is not None:
+ return func(*args, **kwargs)
+ else:
+ return execute_command_mock(*_args, **_kwargs)
+
+ execute_command_mock.side_effect = execute_command
+
+ with patch.object(CommandsParser, 'initialize',
+ autospec=True) as cmd_parser_initialize:
+
+ def cmd_init_mock(self, r):
+ self.commands = {'get': {'name': 'get', 'arity': 2,
+ 'flags': ['readonly',
+ 'fast'],
+ 'first_key_pos': 1,
+ 'last_key_pos': 1,
+ 'step_count': 1}}
+
+ cmd_parser_initialize.side_effect = cmd_init_mock
+
+ return RedisCluster(*args, **kwargs)
+
+
+def mock_node_resp(node, response):
+ connection = Mock()
+ connection.read_response.return_value = response
+ node.redis_connection.connection = connection
+ return node
+
+
+def mock_node_resp_func(node, func):
+ connection = Mock()
+ connection.read_response.side_effect = func
+ node.redis_connection.connection = connection
+ return node
+
+
+def mock_all_nodes_resp(rc, response):
+ for node in rc.get_nodes():
+ mock_node_resp(node, response)
+ return rc
+
+
+def find_node_ip_based_on_port(cluster_client, port):
+ for node in cluster_client.get_nodes():
+ if node.port == port:
+ return node.host
+
+
+def moved_redirection_helper(request, failover=False):
+ """
+ Test that the client handles MOVED response after a failover.
+ Redirection after a failover means that the redirection address is of a
+ replica that was promoted to a primary.
+
+ At first call it should return a MOVED ResponseError that will point
+ the client to the next server it should talk to.
+
+ Verify that:
+ 1. it tries to talk to the redirected node
+ 2. it updates the slot's primary to the redirected node
+
+ For a failover, also verify:
+ 3. the redirected node's server type updated to 'primary'
+ 4. the server type of the previous slot owner updated to 'replica'
+ """
+ rc = _get_client(RedisCluster, request, flushdb=False)
+ slot = 12182
+ redirect_node = None
+ # Get the current primary that holds this slot
+ prev_primary = rc.nodes_manager.get_node_from_slot(slot)
+ if failover:
+ if len(rc.nodes_manager.slots_cache[slot]) < 2:
+ warnings.warn("Skipping this test since it requires to have a "
+ "replica")
+ return
+ redirect_node = rc.nodes_manager.slots_cache[slot][1]
+ else:
+ # Use one of the primaries to be the redirected node
+ redirect_node = rc.get_primaries()[0]
+ r_host = redirect_node.host
+ r_port = redirect_node.port
+ with patch.object(Redis, 'parse_response') as parse_response:
+ def moved_redirect_effect(connection, *args, **options):
+ def ok_response(connection, *args, **options):
+ assert connection.host == r_host
+ assert connection.port == r_port
+
+ return "MOCK_OK"
+
+ parse_response.side_effect = ok_response
+ raise MovedError("{0} {1}:{2}".format(slot, r_host, r_port))
+
+ parse_response.side_effect = moved_redirect_effect
+ assert rc.execute_command("SET", "foo", "bar") == "MOCK_OK"
+ slot_primary = rc.nodes_manager.slots_cache[slot][0]
+ assert slot_primary == redirect_node
+ if failover:
+ assert rc.get_node(host=r_host, port=r_port).server_type == PRIMARY
+ assert prev_primary.server_type == REPLICA
+
+
+@pytest.mark.onlycluster
+class TestRedisClusterObj:
+ """
+ Tests for the RedisCluster class
+ """
+
+ def test_host_port_startup_node(self):
+ """
+ Test that it is possible to use host & port arguments as startup node
+ args
+ """
+ cluster = get_mocked_redis_client(host=default_host, port=default_port)
+ assert cluster.get_node(host=default_host,
+ port=default_port) is not None
+
+ def test_startup_nodes(self):
+ """
+ Test that it is possible to use startup_nodes
+ argument to init the cluster
+ """
+ port_1 = 7000
+ port_2 = 7001
+ startup_nodes = [ClusterNode(default_host, port_1),
+ ClusterNode(default_host, port_2)]
+ cluster = get_mocked_redis_client(startup_nodes=startup_nodes)
+ assert cluster.get_node(host=default_host, port=port_1) is not None \
+ and cluster.get_node(host=default_host, port=port_2) is not None
+
+ def test_empty_startup_nodes(self):
+ """
+ Test that exception is raised when empty providing empty startup_nodes
+ """
+ with pytest.raises(RedisClusterException) as ex:
+ RedisCluster(startup_nodes=[])
+
+ assert str(ex.value).startswith(
+ "RedisCluster requires at least one node to discover the "
+ "cluster"), str_if_bytes(ex.value)
+
+ def test_from_url(self, r):
+ redis_url = "redis://{0}:{1}/0".format(default_host, default_port)
+ with patch.object(RedisCluster, 'from_url') as from_url:
+ def from_url_mocked(_url, **_kwargs):
+ return get_mocked_redis_client(url=_url, **_kwargs)
+
+ from_url.side_effect = from_url_mocked
+ cluster = RedisCluster.from_url(redis_url)
+ assert cluster.get_node(host=default_host,
+ port=default_port) is not None
+
+ def test_execute_command_errors(self, r):
+ """
+ Test that if no key is provided then exception should be raised.
+ """
+ with pytest.raises(RedisClusterException) as ex:
+ r.execute_command("GET")
+ assert str(ex.value).startswith("No way to dispatch this command to "
+ "Redis Cluster. Missing key.")
+
+ def test_execute_command_node_flag_primaries(self, r):
+ """
+ Test command execution with nodes flag PRIMARIES
+ """
+ primaries = r.get_primaries()
+ replicas = r.get_replicas()
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.PRIMARIES) is True
+ for primary in primaries:
+ conn = primary.redis_connection.connection
+ assert conn.read_response.called is True
+ for replica in replicas:
+ conn = replica.redis_connection.connection
+ assert conn.read_response.called is not True
+
+ def test_execute_command_node_flag_replicas(self, r):
+ """
+ Test command execution with nodes flag REPLICAS
+ """
+ replicas = r.get_replicas()
+ if not replicas:
+ r = get_mocked_redis_client(default_host, default_port)
+ primaries = r.get_primaries()
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.REPLICAS) is True
+ for replica in replicas:
+ conn = replica.redis_connection.connection
+ assert conn.read_response.called is True
+ for primary in primaries:
+ conn = primary.redis_connection.connection
+ assert conn.read_response.called is not True
+
+ def test_execute_command_node_flag_all_nodes(self, r):
+ """
+ Test command execution with nodes flag ALL_NODES
+ """
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.ALL_NODES) is True
+ for node in r.get_nodes():
+ conn = node.redis_connection.connection
+ assert conn.read_response.called is True
+
+ def test_execute_command_node_flag_random(self, r):
+ """
+ Test command execution with nodes flag RANDOM
+ """
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.RANDOM) is True
+ called_count = 0
+ for node in r.get_nodes():
+ conn = node.redis_connection.connection
+ if conn.read_response.called is True:
+ called_count += 1
+ assert called_count == 1
+
+ def test_execute_command_default_node(self, r):
+ """
+ Test command execution without node flag is being executed on the
+ default node
+ """
+ def_node = r.get_default_node()
+ mock_node_resp(def_node, 'PONG')
+ assert r.ping() is True
+ conn = def_node.redis_connection.connection
+ assert conn.read_response.called
+
+ def test_ask_redirection(self, r):
+ """
+ Test that the server handles ASK response.
+
+ At first call it should return a ASK ResponseError that will point
+ the client to the next server it should talk to.
+
+ Important thing to verify is that it tries to talk to the second node.
+ """
+ redirect_node = r.get_nodes()[0]
+ with patch.object(Redis, 'parse_response') as parse_response:
+ def ask_redirect_effect(connection, *args, **options):
+ def ok_response(connection, *args, **options):
+ assert connection.host == redirect_node.host
+ assert connection.port == redirect_node.port
+
+ return "MOCK_OK"
+
+ parse_response.side_effect = ok_response
+ raise AskError("12182 {0}:{1}".format(redirect_node.host,
+ redirect_node.port))
+
+ parse_response.side_effect = ask_redirect_effect
+
+ assert r.execute_command("SET", "foo", "bar") == "MOCK_OK"
+
+ def test_moved_redirection(self, request):
+ """
+ Test that the client handles MOVED response.
+ """
+ moved_redirection_helper(request, failover=False)
+
+ def test_moved_redirection_after_failover(self, request):
+ """
+ Test that the client handles MOVED response after a failover.
+ """
+ moved_redirection_helper(request, failover=True)
+
+ def test_refresh_using_specific_nodes(self, request):
+ """
+ Test making calls on specific nodes when the cluster has failed over to
+ another node
+ """
+ node_7006 = ClusterNode(host=default_host, port=7006,
+ server_type=PRIMARY)
+ node_7007 = ClusterNode(host=default_host, port=7007,
+ server_type=PRIMARY)
+ with patch.object(Redis, 'parse_response') as parse_response:
+ with patch.object(NodesManager, 'initialize', autospec=True) as \
+ initialize:
+ with patch.multiple(Connection,
+ send_command=DEFAULT,
+ connect=DEFAULT,
+ can_read=DEFAULT) as mocks:
+ # simulate 7006 as a failed node
+ def parse_response_mock(connection, command_name,
+ **options):
+ if connection.port == 7006:
+ parse_response.failed_calls += 1
+ raise ClusterDownError(
+ 'CLUSTERDOWN The cluster is '
+ 'down. Use CLUSTER INFO for '
+ 'more information')
+ elif connection.port == 7007:
+ parse_response.successful_calls += 1
+
+ def initialize_mock(self):
+ # start with all slots mapped to 7006
+ self.nodes_cache = {node_7006.name: node_7006}
+ self.default_node = node_7006
+ self.slots_cache = {}
+
+ for i in range(0, 16383):
+ self.slots_cache[i] = [node_7006]
+
+ # After the first connection fails, a reinitialize
+ # should follow the cluster to 7007
+ def map_7007(self):
+ self.nodes_cache = {
+ node_7007.name: node_7007}
+ self.default_node = node_7007
+ self.slots_cache = {}
+
+ for i in range(0, 16383):
+ self.slots_cache[i] = [node_7007]
+
+ # Change initialize side effect for the second call
+ initialize.side_effect = map_7007
+
+ parse_response.side_effect = parse_response_mock
+ parse_response.successful_calls = 0
+ parse_response.failed_calls = 0
+ initialize.side_effect = initialize_mock
+ mocks['can_read'].return_value = False
+ mocks['send_command'].return_value = "MOCK_OK"
+ mocks['connect'].return_value = None
+ with patch.object(CommandsParser, 'initialize',
+ autospec=True) as cmd_parser_initialize:
+
+ def cmd_init_mock(self, r):
+ self.commands = {'get': {'name': 'get', 'arity': 2,
+ 'flags': ['readonly',
+ 'fast'],
+ 'first_key_pos': 1,
+ 'last_key_pos': 1,
+ 'step_count': 1}}
+
+ cmd_parser_initialize.side_effect = cmd_init_mock
+
+ rc = _get_client(
+ RedisCluster, request, flushdb=False)
+ assert len(rc.get_nodes()) == 1
+ assert rc.get_node(node_name=node_7006.name) is not \
+ None
+
+ rc.get('foo')
+
+ # Cluster should now point to 7007, and there should be
+ # one failed and one successful call
+ assert len(rc.get_nodes()) == 1
+ assert rc.get_node(node_name=node_7007.name) is not \
+ None
+ assert rc.get_node(node_name=node_7006.name) is None
+ assert parse_response.failed_calls == 1
+ assert parse_response.successful_calls == 1
+
+ def test_reading_from_replicas_in_round_robin(self):
+ with patch.multiple(Connection, send_command=DEFAULT,
+ read_response=DEFAULT, _connect=DEFAULT,
+ can_read=DEFAULT, on_connect=DEFAULT) as mocks:
+ with patch.object(Redis, 'parse_response') as parse_response:
+ def parse_response_mock_first(connection, *args, **options):
+ # Primary
+ assert connection.port == 7001
+ parse_response.side_effect = parse_response_mock_second
+ return "MOCK_OK"
+
+ def parse_response_mock_second(connection, *args, **options):
+ # Replica
+ assert connection.port == 7002
+ parse_response.side_effect = parse_response_mock_third
+ return "MOCK_OK"
+
+ def parse_response_mock_third(connection, *args, **options):
+ # Primary
+ assert connection.port == 7001
+ return "MOCK_OK"
+
+ # We don't need to create a real cluster connection but we
+ # do want RedisCluster.on_connect function to get called,
+ # so we'll mock some of the Connection's functions to allow it
+ parse_response.side_effect = parse_response_mock_first
+ mocks['send_command'].return_value = True
+ mocks['read_response'].return_value = "OK"
+ mocks['_connect'].return_value = True
+ mocks['can_read'].return_value = False
+ mocks['on_connect'].return_value = True
+
+ # Create a cluster with reading from replications
+ read_cluster = get_mocked_redis_client(host=default_host,
+ port=default_port,
+ read_from_replicas=True)
+ assert read_cluster.read_from_replicas is True
+ # Check that we read from the slot's nodes in a round robin
+ # matter.
+ # 'foo' belongs to slot 12182 and the slot's nodes are:
+ # [(127.0.0.1,7001,primary), (127.0.0.1,7002,replica)]
+ read_cluster.get("foo")
+ read_cluster.get("foo")
+ read_cluster.get("foo")
+ mocks['send_command'].assert_has_calls([call('READONLY')])
+
+ def test_keyslot(self, r):
+ """
+ Test that method will compute correct key in all supported cases
+ """
+ assert r.keyslot("foo") == 12182
+ assert r.keyslot("{foo}bar") == 12182
+ assert r.keyslot("{foo}") == 12182
+ assert r.keyslot(1337) == 4314
+
+ assert r.keyslot(125) == r.keyslot(b"125")
+ assert r.keyslot(125) == r.keyslot("\x31\x32\x35")
+ assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
+ assert r.keyslot(u"大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
+ assert r.keyslot(1337.1234) == r.keyslot("1337.1234")
+ assert r.keyslot(1337) == r.keyslot("1337")
+ assert r.keyslot(b"abc") == r.keyslot("abc")
+
+ def test_get_node_name(self):
+ assert get_node_name(default_host, default_port) == \
+ "{0}:{1}".format(default_host, default_port)
+
+ def test_all_nodes(self, r):
+ """
+ Set a list of nodes and it should be possible to iterate over all
+ """
+ nodes = [node for node in r.nodes_manager.nodes_cache.values()]
+
+ for i, node in enumerate(r.get_nodes()):
+ assert node in nodes
+
+ def test_all_nodes_masters(self, r):
+ """
+ Set a list of nodes with random primaries/replicas config and it shold
+ be possible to iterate over all of them.
+ """
+ nodes = [node for node in r.nodes_manager.nodes_cache.values()
+ if node.server_type == PRIMARY]
+
+ for node in r.get_primaries():
+ assert node in nodes
+
+ def test_cluster_down_overreaches_retry_attempts(self):
+ """
+ When ClusterDownError is thrown, test that we retry executing the
+ command as many times as configured in cluster_error_retry_attempts
+ and then raise the exception
+ """
+ with patch.object(RedisCluster, '_execute_command') as execute_command:
+ def raise_cluster_down_error(target_node, *args, **kwargs):
+ execute_command.failed_calls += 1
+ raise ClusterDownError(
+ 'CLUSTERDOWN The cluster is down. Use CLUSTER INFO for '
+ 'more information')
+
+ execute_command.side_effect = raise_cluster_down_error
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port)
+
+ with pytest.raises(ClusterDownError):
+ rc.get("bar")
+ assert execute_command.failed_calls == \
+ rc.cluster_error_retry_attempts
+
+ def test_connection_error_overreaches_retry_attempts(self):
+ """
+ When ConnectionError is thrown, test that we retry executing the
+ command as many times as configured in cluster_error_retry_attempts
+ and then raise the exception
+ """
+ with patch.object(RedisCluster, '_execute_command') as execute_command:
+ def raise_conn_error(target_node, *args, **kwargs):
+ execute_command.failed_calls += 1
+ raise ConnectionError()
+
+ execute_command.side_effect = raise_conn_error
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port)
+
+ with pytest.raises(ConnectionError):
+ rc.get("bar")
+ assert execute_command.failed_calls == \
+ rc.cluster_error_retry_attempts
+
+ def test_user_on_connect_function(self, request):
+ """
+ Test support in passing on_connect function by the user
+ """
+
+ def on_connect(connection):
+ assert connection is not None
+
+ mock = Mock(side_effect=on_connect)
+
+ _get_client(RedisCluster, request, redis_connect_func=mock)
+ assert mock.called is True
+
+ def test_set_default_node_success(self, r):
+ """
+ test successful replacement of the default cluster node
+ """
+ default_node = r.get_default_node()
+ # get a different node
+ new_def_node = None
+ for node in r.get_nodes():
+ if node != default_node:
+ new_def_node = node
+ break
+ assert r.set_default_node(new_def_node) is True
+ assert r.get_default_node() == new_def_node
+
+ def test_set_default_node_failure(self, r):
+ """
+ test failed replacement of the default cluster node
+ """
+ default_node = r.get_default_node()
+ new_def_node = ClusterNode('1.1.1.1', 1111)
+ assert r.set_default_node(None) is False
+ assert r.set_default_node(new_def_node) is False
+ assert r.get_default_node() == default_node
+
+ def test_get_node_from_key(self, r):
+ """
+ Test that get_node_from_key function returns the correct node
+ """
+ key = 'bar'
+ slot = r.keyslot(key)
+ slot_nodes = r.nodes_manager.slots_cache.get(slot)
+ primary = slot_nodes[0]
+ assert r.get_node_from_key(key, replica=False) == primary
+ replica = r.get_node_from_key(key, replica=True)
+ if replica is not None:
+ assert replica.server_type == REPLICA
+ assert replica in slot_nodes
+
+
+@pytest.mark.onlycluster
+class TestClusterRedisCommands:
+ """
+ Tests for RedisCluster unique commands
+ """
+
+ def test_case_insensitive_command_names(self, r):
+ assert r.cluster_response_callbacks['cluster addslots'] == \
+ r.cluster_response_callbacks['CLUSTER ADDSLOTS']
+
+ def test_get_and_set(self, r):
+ # get and set can't be tested independently of each other
+ assert r.get('a') is None
+ byte_string = b'value'
+ integer = 5
+ unicode_string = chr(3456) + 'abcd' + chr(3421)
+ assert r.set('byte_string', byte_string)
+ assert r.set('integer', 5)
+ assert r.set('unicode_string', unicode_string)
+ assert r.get('byte_string') == byte_string
+ assert r.get('integer') == str(integer).encode()
+ assert r.get('unicode_string').decode('utf-8') == unicode_string
+
+ def test_mget_nonatomic(self, r):
+ assert r.mget_nonatomic([]) == []
+ assert r.mget_nonatomic(['a', 'b']) == [None, None]
+ r['a'] = '1'
+ r['b'] = '2'
+ r['c'] = '3'
+
+ assert (r.mget_nonatomic('a', 'other', 'b', 'c') ==
+ [b'1', None, b'2', b'3'])
+
+ def test_mset_nonatomic(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ assert r.mset_nonatomic(d)
+ for k, v in d.items():
+ assert r[k] == v
+
+ def test_config_set(self, r):
+ assert r.config_set('slowlog-log-slower-than', 0)
+
+ def test_cluster_config_resetstat(self, r):
+ r.ping(target_nodes='all')
+ all_info = r.info(target_nodes='all')
+ prior_commands_processed = -1
+ for node_info in all_info.values():
+ prior_commands_processed = node_info['total_commands_processed']
+ assert prior_commands_processed >= 1
+ r.config_resetstat(target_nodes='all')
+ all_info = r.info(target_nodes='all')
+ for node_info in all_info.values():
+ reset_commands_processed = node_info['total_commands_processed']
+ assert reset_commands_processed < prior_commands_processed
+
+ def test_client_setname(self, r):
+ node = r.get_random_node()
+ r.client_setname('redis_py_test', target_nodes=node)
+ client_name = r.client_getname(target_nodes=node)
+ assert client_name == 'redis_py_test'
+
+ def test_exists(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.exists(*d.keys()) == len(d)
+
+ def test_delete(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.delete(*d.keys()) == len(d)
+ assert r.delete(*d.keys()) == 0
+
+ def test_touch(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.touch(*d.keys()) == len(d)
+
+ def test_unlink(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.unlink(*d.keys()) == len(d)
+ # Unlink is non-blocking so we sleep before
+ # verifying the deletion
+ sleep(0.1)
+ assert r.unlink(*d.keys()) == 0
+
+ def test_pubsub_channels_merge_results(self, r):
+ nodes = r.get_nodes()
+ channels = []
+ pubsub_nodes = []
+ i = 0
+ for node in nodes:
+ channel = "foo{0}".format(i)
+ # We will create different pubsub clients where each one is
+ # connected to a different node
+ p = r.pubsub(node)
+ pubsub_nodes.append(p)
+ p.subscribe(channel)
+ b_channel = channel.encode('utf-8')
+ channels.append(b_channel)
+ # Assert that each node returns only the channel it subscribed to
+ sub_channels = node.redis_connection.pubsub_channels()
+ if not sub_channels:
+ # Try again after a short sleep
+ sleep(0.3)
+ sub_channels = node.redis_connection.pubsub_channels()
+ assert sub_channels == [b_channel]
+ i += 1
+ # Assert that the cluster's pubsub_channels function returns ALL of
+ # the cluster's channels
+ result = r.pubsub_channels(target_nodes='all')
+ result.sort()
+ assert result == channels
+
+ def test_pubsub_numsub_merge_results(self, r):
+ nodes = r.get_nodes()
+ pubsub_nodes = []
+ channel = "foo"
+ b_channel = channel.encode('utf-8')
+ for node in nodes:
+ # We will create different pubsub clients where each one is
+ # connected to a different node
+ p = r.pubsub(node)
+ pubsub_nodes.append(p)
+ p.subscribe(channel)
+ # Assert that each node returns that only one client is subscribed
+ sub_chann_num = node.redis_connection.pubsub_numsub(channel)
+ if sub_chann_num == [(b_channel, 0)]:
+ sleep(0.3)
+ sub_chann_num = node.redis_connection.pubsub_numsub(channel)
+ assert sub_chann_num == [(b_channel, 1)]
+ # Assert that the cluster's pubsub_numsub function returns ALL clients
+ # subscribed to this channel in the entire cluster
+ assert r.pubsub_numsub(channel, target_nodes='all') == \
+ [(b_channel, len(nodes))]
+
+ def test_pubsub_numpat_merge_results(self, r):
+ nodes = r.get_nodes()
+ pubsub_nodes = []
+ pattern = "foo*"
+ for node in nodes:
+ # We will create different pubsub clients where each one is
+ # connected to a different node
+ p = r.pubsub(node)
+ pubsub_nodes.append(p)
+ p.psubscribe(pattern)
+ # Assert that each node returns that only one client is subscribed
+ sub_num_pat = node.redis_connection.pubsub_numpat()
+ if sub_num_pat == 0:
+ sleep(0.3)
+ sub_num_pat = node.redis_connection.pubsub_numpat()
+ assert sub_num_pat == 1
+ # Assert that the cluster's pubsub_numsub function returns ALL clients
+ # subscribed to this channel in the entire cluster
+ assert r.pubsub_numpat(target_nodes='all') == len(nodes)
+
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_pubsub_channels(self, r):
+ p = r.pubsub()
+ p.subscribe('foo', 'bar', 'baz', 'quux')
+ for i in range(4):
+ assert wait_for_message(p, timeout=0.5)['type'] == 'subscribe'
+ expected = [b'bar', b'baz', b'foo', b'quux']
+ assert all([channel in r.pubsub_channels(target_nodes='all')
+ for channel in expected])
+
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_pubsub_numsub(self, r):
+ p1 = r.pubsub()
+ p1.subscribe('foo', 'bar', 'baz')
+ for i in range(3):
+ assert wait_for_message(p1, timeout=0.5)['type'] == 'subscribe'
+ p2 = r.pubsub()
+ p2.subscribe('bar', 'baz')
+ for i in range(2):
+ assert wait_for_message(p2, timeout=0.5)['type'] == 'subscribe'
+ p3 = r.pubsub()
+ p3.subscribe('baz')
+ assert wait_for_message(p3, timeout=0.5)['type'] == 'subscribe'
+
+ channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
+ assert r.pubsub_numsub('foo', 'bar', 'baz', target_nodes='all') \
+ == channels
+
+ def test_cluster_slots(self, r):
+ mock_all_nodes_resp(r, default_cluster_slots)
+ cluster_slots = r.cluster_slots()
+ assert isinstance(cluster_slots, dict)
+ assert len(default_cluster_slots) == len(cluster_slots)
+ assert cluster_slots.get((0, 8191)) is not None
+ assert cluster_slots.get((0, 8191)).get('primary') == \
+ ('127.0.0.1', 7000)
+
+ def test_cluster_addslots(self, r):
+ node = r.get_random_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_addslots(node, 1, 2, 3) is True
+
+ def test_cluster_countkeysinslot(self, r):
+ node = r.nodes_manager.get_node_from_slot(1)
+ mock_node_resp(node, 2)
+ assert r.cluster_countkeysinslot(1) == 2
+
+ def test_cluster_count_failure_report(self, r):
+ mock_all_nodes_resp(r, 0)
+ assert r.cluster_count_failure_report('node_0') == 0
+
+ def test_cluster_delslots(self):
+ cluster_slots = [
+ [
+ 0, 8191,
+ ['127.0.0.1', 7000, 'node_0'],
+ ],
+ [
+ 8192, 16383,
+ ['127.0.0.1', 7001, 'node_1'],
+ ]
+ ]
+ r = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots)
+ mock_all_nodes_resp(r, 'OK')
+ node0 = r.get_node(default_host, 7000)
+ node1 = r.get_node(default_host, 7001)
+ assert r.cluster_delslots(0, 8192) == [True, True]
+ assert node0.redis_connection.connection.read_response.called
+ assert node1.redis_connection.connection.read_response.called
+
+ def test_cluster_failover(self, r):
+ node = r.get_random_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_failover(node) is True
+ assert r.cluster_failover(node, 'FORCE') is True
+ assert r.cluster_failover(node, 'TAKEOVER') is True
+ with pytest.raises(RedisError):
+ r.cluster_failover(node, 'FORCT')
+
+ def test_cluster_info(self, r):
+ info = r.cluster_info()
+ assert isinstance(info, dict)
+ assert info['cluster_state'] == 'ok'
+
+ def test_cluster_keyslot(self, r):
+ mock_all_nodes_resp(r, 12182)
+ assert r.cluster_keyslot('foo') == 12182
+
+ def test_cluster_meet(self, r):
+ node = r.get_default_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_meet('127.0.0.1', 6379) is True
+
+ def test_cluster_nodes(self, r):
+ response = (
+ 'c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
+ 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
+ '1447836263059 5 connected\n'
+ '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
+ 'master - 0 1447836264065 0 connected\n'
+ 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
+ 'myself,master - 0 0 2 connected 5461-10922\n'
+ '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
+ 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
+ '1447836262556 3 connected\n'
+ '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
+ 'master - 0 1447836262555 7 connected 0-5460\n'
+ '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
+ 'master - 0 1447836263562 3 connected 10923-16383\n'
+ 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
+ 'master,fail - 1447829446956 1447829444948 1 disconnected\n'
+ )
+ mock_all_nodes_resp(r, response)
+ nodes = r.cluster_nodes()
+ assert len(nodes) == 7
+ assert nodes.get('172.17.0.7:7006') is not None
+ assert nodes.get('172.17.0.7:7006').get('node_id') == \
+ "c8253bae761cb1ecb2b61857d85dfe455a0fec8b"
+
+ def test_cluster_replicate(self, r):
+ node = r.get_random_node()
+ all_replicas = r.get_replicas()
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_replicate(node, 'c8253bae761cb61857d') is True
+ results = r.cluster_replicate(all_replicas, 'c8253bae761cb61857d')
+ if isinstance(results, dict):
+ for res in results.values():
+ assert res is True
+ else:
+ assert results is True
+
+ def test_cluster_reset(self, r):
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_reset() is True
+ assert r.cluster_reset(False) is True
+ all_results = r.cluster_reset(False, target_nodes='all')
+ for res in all_results.values():
+ assert res is True
+
+ def test_cluster_save_config(self, r):
+ node = r.get_random_node()
+ all_nodes = r.get_nodes()
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_save_config(node) is True
+ all_results = r.cluster_save_config(all_nodes)
+ for res in all_results.values():
+ assert res is True
+
+ def test_cluster_get_keys_in_slot(self, r):
+ response = [b'{foo}1', b'{foo}2']
+ node = r.nodes_manager.get_node_from_slot(12182)
+ mock_node_resp(node, response)
+ keys = r.cluster_get_keys_in_slot(12182, 4)
+ assert keys == response
+
+ def test_cluster_set_config_epoch(self, r):
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_set_config_epoch(3) is True
+ all_results = r.cluster_set_config_epoch(3, target_nodes='all')
+ for res in all_results.values():
+ assert res is True
+
+ def test_cluster_setslot(self, r):
+ node = r.get_random_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_setslot(node, 'node_0', 1218, 'IMPORTING') is True
+ assert r.cluster_setslot(node, 'node_0', 1218, 'NODE') is True
+ assert r.cluster_setslot(node, 'node_0', 1218, 'MIGRATING') is True
+ with pytest.raises(RedisError):
+ r.cluster_failover(node, 'STABLE')
+ with pytest.raises(RedisError):
+ r.cluster_failover(node, 'STATE')
+
+ def test_cluster_setslot_stable(self, r):
+ node = r.nodes_manager.get_node_from_slot(12182)
+ mock_node_resp(node, 'OK')
+ assert r.cluster_setslot_stable(12182) is True
+ assert node.redis_connection.connection.read_response.called
+
+ def test_cluster_replicas(self, r):
+ response = [b'01eca22229cf3c652b6fca0d09ff6941e0d2e3 '
+ b'127.0.0.1:6377@16377 slave '
+ b'52611e796814b78e90ad94be9d769a4f668f9a 0 '
+ b'1634550063436 4 connected',
+ b'r4xfga22229cf3c652b6fca0d09ff69f3e0d4d '
+ b'127.0.0.1:6378@16378 slave '
+ b'52611e796814b78e90ad94be9d769a4f668f9a 0 '
+ b'1634550063436 4 connected']
+ mock_all_nodes_resp(r, response)
+ replicas = r.cluster_replicas('52611e796814b78e90ad94be9d769a4f668f9a')
+ assert replicas.get('127.0.0.1:6377') is not None
+ assert replicas.get('127.0.0.1:6378') is not None
+ assert replicas.get('127.0.0.1:6378').get('node_id') == \
+ 'r4xfga22229cf3c652b6fca0d09ff69f3e0d4d'
+
+ def test_readonly(self):
+ r = get_mocked_redis_client(host=default_host, port=default_port)
+ mock_all_nodes_resp(r, 'OK')
+ assert r.readonly() is True
+ all_replicas_results = r.readonly(target_nodes='replicas')
+ for res in all_replicas_results.values():
+ assert res is True
+ for replica in r.get_replicas():
+ assert replica.redis_connection.connection.read_response.called
+
+ def test_readwrite(self):
+ r = get_mocked_redis_client(host=default_host, port=default_port)
+ mock_all_nodes_resp(r, 'OK')
+ assert r.readwrite() is True
+ all_replicas_results = r.readwrite(target_nodes='replicas')
+ for res in all_replicas_results.values():
+ assert res is True
+ for replica in r.get_replicas():
+ assert replica.redis_connection.connection.read_response.called
+
+ def test_bgsave(self, r):
+ assert r.bgsave()
+ sleep(0.3)
+ assert r.bgsave(True)
+
+ def test_info(self, r):
+ # Map keys to same slot
+ r.set('x{1}', 1)
+ r.set('y{1}', 2)
+ r.set('z{1}', 3)
+ # Get node that handles the slot
+ slot = r.keyslot('x{1}')
+ node = r.nodes_manager.get_node_from_slot(slot)
+ # Run info on that node
+ info = r.info(target_nodes=node)
+ assert isinstance(info, dict)
+ assert info['db0']['keys'] == 3
+
+ def _init_slowlog_test(self, r, node):
+ slowlog_lim = r.config_get('slowlog-log-slower-than',
+ target_nodes=node)
+ assert r.config_set('slowlog-log-slower-than', 0, target_nodes=node) \
+ is True
+ return slowlog_lim['slowlog-log-slower-than']
+
+ def _teardown_slowlog_test(self, r, node, prev_limit):
+ assert r.config_set('slowlog-log-slower-than', prev_limit,
+ target_nodes=node) is True
+
+ def test_slowlog_get(self, r, slowlog):
+ unicode_string = chr(3456) + 'abcd' + chr(3421)
+ node = r.get_node_from_key(unicode_string)
+ slowlog_limit = self._init_slowlog_test(r, node)
+ assert r.slowlog_reset(target_nodes=node)
+ r.get(unicode_string)
+ slowlog = r.slowlog_get(target_nodes=node)
+ assert isinstance(slowlog, list)
+ commands = [log['command'] for log in slowlog]
+
+ get_command = b' '.join((b'GET', unicode_string.encode('utf-8')))
+ assert get_command in commands
+ assert b'SLOWLOG RESET' in commands
+
+ # the order should be ['GET <uni string>', 'SLOWLOG RESET'],
+ # but if other clients are executing commands at the same time, there
+ # could be commands, before, between, or after, so just check that
+ # the two we care about are in the appropriate order.
+ assert commands.index(get_command) < commands.index(b'SLOWLOG RESET')
+
+ # make sure other attributes are typed correctly
+ assert isinstance(slowlog[0]['start_time'], int)
+ assert isinstance(slowlog[0]['duration'], int)
+ # rollback the slowlog limit to its original value
+ self._teardown_slowlog_test(r, node, slowlog_limit)
+
+ def test_slowlog_get_limit(self, r, slowlog):
+ assert r.slowlog_reset()
+ node = r.get_node_from_key('foo')
+ slowlog_limit = self._init_slowlog_test(r, node)
+ r.get('foo')
+ slowlog = r.slowlog_get(1, target_nodes=node)
+ assert isinstance(slowlog, list)
+ # only one command, based on the number we passed to slowlog_get()
+ assert len(slowlog) == 1
+ self._teardown_slowlog_test(r, node, slowlog_limit)
+
+ def test_slowlog_length(self, r, slowlog):
+ r.get('foo')
+ node = r.nodes_manager.get_node_from_slot(key_slot(b'foo'))
+ slowlog_len = r.slowlog_len(target_nodes=node)
+ assert isinstance(slowlog_len, int)
+
+ def test_time(self, r):
+ t = r.time(target_nodes=r.get_primaries()[0])
+ assert len(t) == 2
+ assert isinstance(t[0], int)
+ assert isinstance(t[1], int)
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_usage(self, r):
+ r.set('foo', 'bar')
+ assert isinstance(r.memory_usage('foo'), int)
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_malloc_stats(self, r):
+ assert r.memory_malloc_stats()
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_stats(self, r):
+ # put a key into the current db to make sure that "db.<current-db>"
+ # has data
+ r.set('foo', 'bar')
+ node = r.nodes_manager.get_node_from_slot(key_slot(b'foo'))
+ stats = r.memory_stats(target_nodes=node)
+ assert isinstance(stats, dict)
+ for key, value in stats.items():
+ if key.startswith('db.'):
+ assert isinstance(value, dict)
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_help(self, r):
+ with pytest.raises(NotImplementedError):
+ r.memory_help()
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_doctor(self, r):
+ with pytest.raises(NotImplementedError):
+ r.memory_doctor()
+
+ def test_lastsave(self, r):
+ node = r.get_primaries()[0]
+ assert isinstance(r.lastsave(target_nodes=node),
+ datetime.datetime)
+
+ def test_cluster_echo(self, r):
+ node = r.get_primaries()[0]
+ assert r.echo('foo bar', node) == b'foo bar'
+
+ @skip_if_server_version_lt('1.0.0')
+ def test_debug_segfault(self, r):
+ with pytest.raises(NotImplementedError):
+ r.debug_segfault()
+
+ def test_config_resetstat(self, r):
+ node = r.get_primaries()[0]
+ r.ping(target_nodes=node)
+ prior_commands_processed = \
+ int(r.info(target_nodes=node)['total_commands_processed'])
+ assert prior_commands_processed >= 1
+ r.config_resetstat(target_nodes=node)
+ reset_commands_processed = \
+ int(r.info(target_nodes=node)['total_commands_processed'])
+ assert reset_commands_processed < prior_commands_processed
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_client_trackinginfo(self, r):
+ node = r.get_primaries()[0]
+ res = r.client_trackinginfo(target_nodes=node)
+ assert len(res) > 2
+ assert 'prefixes' in res
+
+ @skip_if_server_version_lt('2.9.50')
+ def test_client_pause(self, r):
+ node = r.get_primaries()[0]
+ assert r.client_pause(1, target_nodes=node)
+ assert r.client_pause(timeout=1, target_nodes=node)
+ with pytest.raises(RedisError):
+ r.client_pause(timeout='not an integer', target_nodes=node)
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_client_unpause(self, r):
+ assert r.client_unpause()
+
+ @skip_if_server_version_lt('5.0.0')
+ def test_client_id(self, r):
+ node = r.get_primaries()[0]
+ assert r.client_id(target_nodes=node) > 0
+
+ @skip_if_server_version_lt('5.0.0')
+ def test_client_unblock(self, r):
+ node = r.get_primaries()[0]
+ myid = r.client_id(target_nodes=node)
+ assert not r.client_unblock(myid, target_nodes=node)
+ assert not r.client_unblock(myid, error=True, target_nodes=node)
+ assert not r.client_unblock(myid, error=False, target_nodes=node)
+
+ @skip_if_server_version_lt('6.0.0')
+ def test_client_getredir(self, r):
+ node = r.get_primaries()[0]
+ assert isinstance(r.client_getredir(target_nodes=node), int)
+ assert r.client_getredir(target_nodes=node) == -1
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_client_info(self, r):
+ node = r.get_primaries()[0]
+ info = r.client_info(target_nodes=node)
+ assert isinstance(info, dict)
+ assert 'addr' in info
+
+ @skip_if_server_version_lt('2.6.9')
+ def test_client_kill(self, r, r2):
+ node = r.get_primaries()[0]
+ r.client_setname('redis-py-c1', target_nodes='all')
+ r2.client_setname('redis-py-c2', target_nodes='all')
+ clients = [client for client in r.client_list(target_nodes=node)
+ if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ assert len(clients) == 2
+ clients_by_name = dict([(client.get('name'), client)
+ for client in clients])
+
+ client_addr = clients_by_name['redis-py-c2'].get('addr')
+ assert r.client_kill(client_addr, target_nodes=node) is True
+
+ clients = [client for client in r.client_list(target_nodes=node)
+ if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ assert len(clients) == 1
+ assert clients[0].get('name') == 'redis-py-c1'
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_not_empty_string(self, r):
+ r['{foo}a'] = ''
+ r.bitop('not', '{foo}r', '{foo}a')
+ assert r.get('{foo}r') is None
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_not(self, r):
+ test_str = b'\xAA\x00\xFF\x55'
+ correct = ~0xAA00FF55 & 0xFFFFFFFF
+ r['{foo}a'] = test_str
+ r.bitop('not', '{foo}r', '{foo}a')
+ assert int(binascii.hexlify(r['{foo}r']), 16) == correct
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_not_in_place(self, r):
+ test_str = b'\xAA\x00\xFF\x55'
+ correct = ~0xAA00FF55 & 0xFFFFFFFF
+ r['{foo}a'] = test_str
+ r.bitop('not', '{foo}a', '{foo}a')
+ assert int(binascii.hexlify(r['{foo}a']), 16) == correct
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_single_string(self, r):
+ test_str = b'\x01\x02\xFF'
+ r['{foo}a'] = test_str
+ r.bitop('and', '{foo}res1', '{foo}a')
+ r.bitop('or', '{foo}res2', '{foo}a')
+ r.bitop('xor', '{foo}res3', '{foo}a')
+ assert r['{foo}res1'] == test_str
+ assert r['{foo}res2'] == test_str
+ assert r['{foo}res3'] == test_str
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_string_operands(self, r):
+ r['{foo}a'] = b'\x01\x02\xFF\xFF'
+ r['{foo}b'] = b'\x01\x02\xFF'
+ r.bitop('and', '{foo}res1', '{foo}a', '{foo}b')
+ r.bitop('or', '{foo}res2', '{foo}a', '{foo}b')
+ r.bitop('xor', '{foo}res3', '{foo}a', '{foo}b')
+ assert int(binascii.hexlify(r['{foo}res1']), 16) == 0x0102FF00
+ assert int(binascii.hexlify(r['{foo}res2']), 16) == 0x0102FFFF
+ assert int(binascii.hexlify(r['{foo}res3']), 16) == 0x000000FF
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_copy(self, r):
+ assert r.copy("{foo}a", "{foo}b") == 0
+ r.set("{foo}a", "bar")
+ assert r.copy("{foo}a", "{foo}b") == 1
+ assert r.get("{foo}a") == b"bar"
+ assert r.get("{foo}b") == b"bar"
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_copy_and_replace(self, r):
+ r.set("{foo}a", "foo1")
+ r.set("{foo}b", "foo2")
+ assert r.copy("{foo}a", "{foo}b") == 0
+ assert r.copy("{foo}a", "{foo}b", replace=True) == 1
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_lmove(self, r):
+ r.rpush('{foo}a', 'one', 'two', 'three', 'four')
+ assert r.lmove('{foo}a', '{foo}b')
+ assert r.lmove('{foo}a', '{foo}b', 'right', 'left')
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_blmove(self, r):
+ r.rpush('{foo}a', 'one', 'two', 'three', 'four')
+ assert r.blmove('{foo}a', '{foo}b', 5)
+ assert r.blmove('{foo}a', '{foo}b', 1, 'RIGHT', 'LEFT')
+
+ def test_cluster_msetnx(self, r):
+ d = {'{foo}a': b'1', '{foo}b': b'2', '{foo}c': b'3'}
+ assert r.msetnx(d)
+ d2 = {'{foo}a': b'x', '{foo}d': b'4'}
+ assert not r.msetnx(d2)
+ for k, v in d.items():
+ assert r[k] == v
+ assert r.get('{foo}d') is None
+
+ def test_cluster_rename(self, r):
+ r['{foo}a'] = '1'
+ assert r.rename('{foo}a', '{foo}b')
+ assert r.get('{foo}a') is None
+ assert r['{foo}b'] == b'1'
+
+ def test_cluster_renamenx(self, r):
+ r['{foo}a'] = '1'
+ r['{foo}b'] = '2'
+ assert not r.renamenx('{foo}a', '{foo}b')
+ assert r['{foo}a'] == b'1'
+ assert r['{foo}b'] == b'2'
+
+ # LIST COMMANDS
+ def test_cluster_blpop(self, r):
+ r.rpush('{foo}a', '1', '2')
+ r.rpush('{foo}b', '3', '4')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'3')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'4')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'1')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'2')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) is None
+ r.rpush('{foo}c', '1')
+ assert r.blpop('{foo}c', timeout=1) == (b'{foo}c', b'1')
+
+ def test_cluster_brpop(self, r):
+ r.rpush('{foo}a', '1', '2')
+ r.rpush('{foo}b', '3', '4')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'4')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'3')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'2')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'1')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) is None
+ r.rpush('{foo}c', '1')
+ assert r.brpop('{foo}c', timeout=1) == (b'{foo}c', b'1')
+
+ def test_cluster_brpoplpush(self, r):
+ r.rpush('{foo}a', '1', '2')
+ r.rpush('{foo}b', '3', '4')
+ assert r.brpoplpush('{foo}a', '{foo}b') == b'2'
+ assert r.brpoplpush('{foo}a', '{foo}b') == b'1'
+ assert r.brpoplpush('{foo}a', '{foo}b', timeout=1) is None
+ assert r.lrange('{foo}a', 0, -1) == []
+ assert r.lrange('{foo}b', 0, -1) == [b'1', b'2', b'3', b'4']
+
+ def test_cluster_brpoplpush_empty_string(self, r):
+ r.rpush('{foo}a', '')
+ assert r.brpoplpush('{foo}a', '{foo}b') == b''
+
+ def test_cluster_rpoplpush(self, r):
+ r.rpush('{foo}a', 'a1', 'a2', 'a3')
+ r.rpush('{foo}b', 'b1', 'b2', 'b3')
+ assert r.rpoplpush('{foo}a', '{foo}b') == b'a3'
+ assert r.lrange('{foo}a', 0, -1) == [b'a1', b'a2']
+ assert r.lrange('{foo}b', 0, -1) == [b'a3', b'b1', b'b2', b'b3']
+
+ def test_cluster_sdiff(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sdiff('{foo}a', '{foo}b') == {b'1', b'2', b'3'}
+ r.sadd('{foo}b', '2', '3')
+ assert r.sdiff('{foo}a', '{foo}b') == {b'1'}
+
+ def test_cluster_sdiffstore(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sdiffstore('{foo}c', '{foo}a', '{foo}b') == 3
+ assert r.smembers('{foo}c') == {b'1', b'2', b'3'}
+ r.sadd('{foo}b', '2', '3')
+ assert r.sdiffstore('{foo}c', '{foo}a', '{foo}b') == 1
+ assert r.smembers('{foo}c') == {b'1'}
+
+ def test_cluster_sinter(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sinter('{foo}a', '{foo}b') == set()
+ r.sadd('{foo}b', '2', '3')
+ assert r.sinter('{foo}a', '{foo}b') == {b'2', b'3'}
+
+ def test_cluster_sinterstore(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sinterstore('{foo}c', '{foo}a', '{foo}b') == 0
+ assert r.smembers('{foo}c') == set()
+ r.sadd('{foo}b', '2', '3')
+ assert r.sinterstore('{foo}c', '{foo}a', '{foo}b') == 2
+ assert r.smembers('{foo}c') == {b'2', b'3'}
+
+ def test_cluster_smove(self, r):
+ r.sadd('{foo}a', 'a1', 'a2')
+ r.sadd('{foo}b', 'b1', 'b2')
+ assert r.smove('{foo}a', '{foo}b', 'a1')
+ assert r.smembers('{foo}a') == {b'a2'}
+ assert r.smembers('{foo}b') == {b'b1', b'b2', b'a1'}
+
+ def test_cluster_sunion(self, r):
+ r.sadd('{foo}a', '1', '2')
+ r.sadd('{foo}b', '2', '3')
+ assert r.sunion('{foo}a', '{foo}b') == {b'1', b'2', b'3'}
+
+ def test_cluster_sunionstore(self, r):
+ r.sadd('{foo}a', '1', '2')
+ r.sadd('{foo}b', '2', '3')
+ assert r.sunionstore('{foo}c', '{foo}a', '{foo}b') == 3
+ assert r.smembers('{foo}c') == {b'1', b'2', b'3'}
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zdiff(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 1, 'a2': 2})
+ assert r.zdiff(['{foo}a', '{foo}b']) == [b'a3']
+ assert r.zdiff(['{foo}a', '{foo}b'], withscores=True) == [b'a3', b'3']
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zdiffstore(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 1, 'a2': 2})
+ assert r.zdiffstore("{foo}out", ['{foo}a', '{foo}b'])
+ assert r.zrange("{foo}out", 0, -1) == [b'a3']
+ assert r.zrange("{foo}out", 0, -1, withscores=True) == [(b'a3', 3.0)]
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zinter(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c']) == [b'a3', b'a1']
+ # invalid aggregation
+ with pytest.raises(DataError):
+ r.zinter(['{foo}a', '{foo}b', '{foo}c'],
+ aggregate='foo', withscores=True)
+ # aggregate with SUM
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], withscores=True) \
+ == [(b'a3', 8), (b'a1', 9)]
+ # aggregate with MAX
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX',
+ withscores=True) \
+ == [(b'a3', 5), (b'a1', 6)]
+ # aggregate with MIN
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN',
+ withscores=True) \
+ == [(b'a1', 1), (b'a3', 1)]
+ # with weights
+ assert r.zinter({'{foo}a': 1, '{foo}b': 2, '{foo}c': 3},
+ withscores=True) \
+ == [(b'a3', 20), (b'a1', 23)]
+
+ def test_cluster_zinterstore_sum(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore('{foo}d', ['{foo}a', '{foo}b', '{foo}c']) == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a3', 8), (b'a1', 9)]
+
+ def test_cluster_zinterstore_max(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX') == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a3', 5), (b'a1', 6)]
+
+ def test_cluster_zinterstore_min(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 3, 'a3': 5})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN') == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a1', 1), (b'a3', 3)]
+
+ def test_cluster_zinterstore_with_weight(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore(
+ '{foo}d', {'{foo}a': 1, '{foo}b': 2, '{foo}c': 3}) == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a3', 20), (b'a1', 23)]
+
+ @skip_if_server_version_lt('4.9.0')
+ def test_cluster_bzpopmax(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2})
+ r.zadd('{foo}b', {'b1': 10, 'b2': 20})
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b2', 20)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b1', 10)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a2', 2)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a1', 1)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) is None
+ r.zadd('{foo}c', {'c1': 100})
+ assert r.bzpopmax('{foo}c', timeout=1) == (b'{foo}c', b'c1', 100)
+
+ @skip_if_server_version_lt('4.9.0')
+ def test_cluster_bzpopmin(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2})
+ r.zadd('{foo}b', {'b1': 10, 'b2': 20})
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b1', 10)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b2', 20)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a1', 1)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a2', 2)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) is None
+ r.zadd('{foo}c', {'c1': 100})
+ assert r.bzpopmin('{foo}c', timeout=1) == (b'{foo}c', b'c1', 100)
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zrangestore(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ assert r.zrangestore('{foo}b', '{foo}a', 0, 1)
+ assert r.zrange('{foo}b', 0, -1) == [b'a1', b'a2']
+ assert r.zrangestore('{foo}b', '{foo}a', 1, 2)
+ assert r.zrange('{foo}b', 0, -1) == [b'a2', b'a3']
+ assert r.zrange('{foo}b', 0, -1, withscores=True) == \
+ [(b'a2', 2), (b'a3', 3)]
+ # reversed order
+ assert r.zrangestore('{foo}b', '{foo}a', 1, 2, desc=True)
+ assert r.zrange('{foo}b', 0, -1) == [b'a1', b'a2']
+ # by score
+ assert r.zrangestore('{foo}b', '{foo}a', 2, 1, byscore=True,
+ offset=0, num=1, desc=True)
+ assert r.zrange('{foo}b', 0, -1) == [b'a2']
+ # by lex
+ assert r.zrangestore('{foo}b', '{foo}a', '[a2', '(a3', bylex=True,
+ offset=0, num=1)
+ assert r.zrange('{foo}b', 0, -1) == [b'a2']
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zunion(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ # sum
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c']) == \
+ [b'a2', b'a4', b'a3', b'a1']
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], withscores=True) == \
+ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ # max
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX',
+ withscores=True) \
+ == [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ # min
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN',
+ withscores=True) \
+ == [(b'a1', 1), (b'a2', 1), (b'a3', 1), (b'a4', 4)]
+ # with weight
+ assert r.zunion({'{foo}a': 1, '{foo}b': 2, '{foo}c': 3},
+ withscores=True) \
+ == [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+
+ def test_cluster_zunionstore_sum(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore('{foo}d', ['{foo}a', '{foo}b', '{foo}c']) == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+
+ def test_cluster_zunionstore_max(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX') == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+
+ def test_cluster_zunionstore_min(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 4})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN') == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)]
+
+ def test_cluster_zunionstore_with_weight(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore(
+ '{foo}d', {'{foo}a': 1, '{foo}b': 2, '{foo}c': 3}) == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+
+ @skip_if_server_version_lt('2.8.9')
+ def test_cluster_pfcount(self, r):
+ members = {b'1', b'2', b'3'}
+ r.pfadd('{foo}a', *members)
+ assert r.pfcount('{foo}a') == len(members)
+ members_b = {b'2', b'3', b'4'}
+ r.pfadd('{foo}b', *members_b)
+ assert r.pfcount('{foo}b') == len(members_b)
+ assert r.pfcount('{foo}a', '{foo}b') == len(members_b.union(members))
+
+ @skip_if_server_version_lt('2.8.9')
+ def test_cluster_pfmerge(self, r):
+ mema = {b'1', b'2', b'3'}
+ memb = {b'2', b'3', b'4'}
+ memc = {b'5', b'6', b'7'}
+ r.pfadd('{foo}a', *mema)
+ r.pfadd('{foo}b', *memb)
+ r.pfadd('{foo}c', *memc)
+ r.pfmerge('{foo}d', '{foo}c', '{foo}a')
+ assert r.pfcount('{foo}d') == 6
+ r.pfmerge('{foo}d', '{foo}b')
+ assert r.pfcount('{foo}d') == 7
+
+ def test_cluster_sort_store(self, r):
+ r.rpush('{foo}a', '2', '3', '1')
+ assert r.sort('{foo}a', store='{foo}sorted_values') == 3
+ assert r.lrange('{foo}sorted_values', 0, -1) == [b'1', b'2', b'3']
+
+ # GEO COMMANDS
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_geosearchstore(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.geosearchstore('{foo}places_barcelona', '{foo}barcelona',
+ longitude=2.191, latitude=41.433, radius=1000)
+ assert r.zrange('{foo}places_barcelona', 0, -1) == [b'place1']
+
+ @skip_unless_arch_bits(64)
+ @skip_if_server_version_lt('6.2.0')
+ def test_geosearchstore_dist(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.geosearchstore('{foo}places_barcelona', '{foo}barcelona',
+ longitude=2.191, latitude=41.433,
+ radius=1000, storedist=True)
+ # instead of save the geo score, the distance is saved.
+ assert r.zscore('{foo}places_barcelona', 'place1') == 88.05060698409301
+
+ @skip_if_server_version_lt('3.2.0')
+ def test_cluster_georadius_store(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.georadius('{foo}barcelona', 2.191, 41.433,
+ 1000, store='{foo}places_barcelona')
+ assert r.zrange('{foo}places_barcelona', 0, -1) == [b'place1']
+
+ @skip_unless_arch_bits(64)
+ @skip_if_server_version_lt('3.2.0')
+ def test_cluster_georadius_store_dist(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.georadius('{foo}barcelona', 2.191, 41.433, 1000,
+ store_dist='{foo}places_barcelona')
+ # instead of save the geo score, the distance is saved.
+ assert r.zscore('{foo}places_barcelona', 'place1') == 88.05060698409301
+
+ def test_cluster_dbsize(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ assert r.mset_nonatomic(d)
+ assert r.dbsize(target_nodes='primaries') == len(d)
+
+ def test_cluster_keys(self, r):
+ assert r.keys() == []
+ keys_with_underscores = {b'test_a', b'test_b'}
+ keys = keys_with_underscores.union({b'testc'})
+ for key in keys:
+ r[key] = 1
+ assert set(r.keys(pattern='test_*', target_nodes='primaries')) == \
+ keys_with_underscores
+ assert set(r.keys(pattern='test*', target_nodes='primaries')) == keys
+
+ # SCAN COMMANDS
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_scan(self, r):
+ r.set('a', 1)
+ r.set('b', 2)
+ r.set('c', 3)
+ cursor, keys = r.scan(target_nodes='primaries')
+ assert cursor == 0
+ assert set(keys) == {b'a', b'b', b'c'}
+ _, keys = r.scan(match='a', target_nodes='primaries')
+ assert set(keys) == {b'a'}
+
+ @skip_if_server_version_lt("6.0.0")
+ def test_cluster_scan_type(self, r):
+ r.sadd('a-set', 1)
+ r.hset('a-hash', 'foo', 2)
+ r.lpush('a-list', 'aux', 3)
+ _, keys = r.scan(match='a*', _type='SET', target_nodes='primaries')
+ assert set(keys) == {b'a-set'}
+
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_scan_iter(self, r):
+ r.set('a', 1)
+ r.set('b', 2)
+ r.set('c', 3)
+ keys = list(r.scan_iter(target_nodes='primaries'))
+ assert set(keys) == {b'a', b'b', b'c'}
+ keys = list(r.scan_iter(match='a', target_nodes='primaries'))
+ assert set(keys) == {b'a'}
+
+ def test_cluster_randomkey(self, r):
+ node = r.get_node_from_key('{foo}')
+ assert r.randomkey(target_nodes=node) is None
+ for key in ('{foo}a', '{foo}b', '{foo}c'):
+ r[key] = 1
+ assert r.randomkey(target_nodes=node) in \
+ (b'{foo}a', b'{foo}b', b'{foo}c')
+
+
+@pytest.mark.onlycluster
+class TestNodesManager:
+ """
+ Tests for the NodesManager class
+ """
+
+ def test_load_balancer(self, r):
+ n_manager = r.nodes_manager
+ lb = n_manager.read_load_balancer
+ slot_1 = 1257
+ slot_2 = 8975
+ node_1 = ClusterNode(default_host, 6379, PRIMARY)
+ node_2 = ClusterNode(default_host, 6378, REPLICA)
+ node_3 = ClusterNode(default_host, 6377, REPLICA)
+ node_4 = ClusterNode(default_host, 6376, PRIMARY)
+ node_5 = ClusterNode(default_host, 6375, REPLICA)
+ n_manager.slots_cache = {
+ slot_1: [node_1, node_2, node_3],
+ slot_2: [node_4, node_5]
+ }
+ primary1_name = n_manager.slots_cache[slot_1][0].name
+ primary2_name = n_manager.slots_cache[slot_2][0].name
+ list1_size = len(n_manager.slots_cache[slot_1])
+ list2_size = len(n_manager.slots_cache[slot_2])
+ # slot 1
+ assert lb.get_server_index(primary1_name, list1_size) == 0
+ assert lb.get_server_index(primary1_name, list1_size) == 1
+ assert lb.get_server_index(primary1_name, list1_size) == 2
+ assert lb.get_server_index(primary1_name, list1_size) == 0
+ # slot 2
+ assert lb.get_server_index(primary2_name, list2_size) == 0
+ assert lb.get_server_index(primary2_name, list2_size) == 1
+ assert lb.get_server_index(primary2_name, list2_size) == 0
+
+ lb.reset()
+ assert lb.get_server_index(primary1_name, list1_size) == 0
+ assert lb.get_server_index(primary2_name, list2_size) == 0
+
+ def test_init_slots_cache_not_all_slots_covered(self):
+ """
+ Test that if not all slots are covered it should raise an exception
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+ with pytest.raises(RedisClusterException) as ex:
+ get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots)
+ assert str(ex.value).startswith(
+ "All slots are not covered after query all startup_nodes.")
+
+ def test_init_slots_cache_not_require_full_coverage_error(self):
+ """
+ When require_full_coverage is set to False and not all slots are
+ covered, if one of the nodes has 'cluster-require_full_coverage'
+ config set to 'yes' the cluster initialization should fail
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+
+ with pytest.raises(RedisClusterException):
+ get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ coverage_result='yes')
+
+ def test_init_slots_cache_not_require_full_coverage_success(self):
+ """
+ When require_full_coverage is set to False and not all slots are
+ covered, if all of the nodes has 'cluster-require_full_coverage'
+ config set to 'no' the cluster initialization should succeed
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ coverage_result='no')
+
+ assert 5460 not in rc.nodes_manager.slots_cache
+
+ def test_init_slots_cache_not_require_full_coverage_skips_check(self):
+ """
+ Test that when require_full_coverage is set to False and
+ skip_full_coverage_check is set to true, the cluster initialization
+ succeed without checking the nodes' Redis configurations
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+
+ with patch.object(NodesManager,
+ 'cluster_require_full_coverage') as conf_check_mock:
+ rc = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ skip_full_coverage_check=True,
+ coverage_result='no')
+
+ assert conf_check_mock.called is False
+ assert 5460 not in rc.nodes_manager.slots_cache
+
+ def test_init_slots_cache(self):
+ """
+ Test that slots cache can in initialized and all slots are covered
+ """
+ good_slots_resp = [
+ [0, 5460, ['127.0.0.1', 7000], ['127.0.0.2', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001], ['127.0.0.2', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002], ['127.0.0.2', 7005]],
+ ]
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=good_slots_resp)
+ n_manager = rc.nodes_manager
+ assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
+ for slot_info in good_slots_resp:
+ all_hosts = ['127.0.0.1', '127.0.0.2']
+ all_ports = [7000, 7001, 7002, 7003, 7004, 7005]
+ slot_start = slot_info[0]
+ slot_end = slot_info[1]
+ for i in range(slot_start, slot_end + 1):
+ assert len(n_manager.slots_cache[i]) == len(slot_info[2:])
+ assert n_manager.slots_cache[i][0].host in all_hosts
+ assert n_manager.slots_cache[i][1].host in all_hosts
+ assert n_manager.slots_cache[i][0].port in all_ports
+ assert n_manager.slots_cache[i][1].port in all_ports
+
+ assert len(n_manager.nodes_cache) == 6
+
+ def test_empty_startup_nodes(self):
+ """
+ It should not be possible to create a node manager with no nodes
+ specified
+ """
+ with pytest.raises(RedisClusterException):
+ NodesManager([])
+
+ def test_wrong_startup_nodes_type(self):
+ """
+ If something other then a list type itteratable is provided it should
+ fail
+ """
+ with pytest.raises(RedisClusterException):
+ NodesManager({})
+
+ def test_init_slots_cache_slots_collision(self, request):
+ """
+ Test that if 2 nodes do not agree on the same slots setup it should
+ raise an error. In this test both nodes will say that the first
+ slots block should be bound to different servers.
+ """
+ with patch.object(NodesManager,
+ 'create_redis_node') as create_redis_node:
+ def create_mocked_redis_node(host, port, **kwargs):
+ """
+ Helper function to return custom slots cache data from
+ different redis nodes
+ """
+ if port == 7000:
+ result = [
+ [
+ 0,
+ 5460,
+ ['127.0.0.1', 7000],
+ ['127.0.0.1', 7003],
+ ],
+ [
+ 5461,
+ 10922,
+ ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004],
+ ],
+ ]
+
+ elif port == 7001:
+ result = [
+ [
+ 0,
+ 5460,
+ ['127.0.0.1', 7001],
+ ['127.0.0.1', 7003],
+ ],
+ [
+ 5461,
+ 10922,
+ ['127.0.0.1', 7000],
+ ['127.0.0.1', 7004],
+ ],
+ ]
+ else:
+ result = []
+
+ r_node = Redis(
+ host=host,
+ port=port
+ )
+
+ orig_execute_command = r_node.execute_command
+
+ def execute_command(*args, **kwargs):
+ if args[0] == 'CLUSTER SLOTS':
+ return result
+ elif args[1] == 'cluster-require-full-coverage':
+ return {'cluster-require-full-coverage': 'yes'}
+ else:
+ return orig_execute_command(*args, **kwargs)
+
+ r_node.execute_command = execute_command
+ return r_node
+
+ create_redis_node.side_effect = create_mocked_redis_node
+
+ with pytest.raises(RedisClusterException) as ex:
+ node_1 = ClusterNode('127.0.0.1', 7000)
+ node_2 = ClusterNode('127.0.0.1', 7001)
+ RedisCluster(startup_nodes=[node_1, node_2])
+ assert str(ex.value).startswith(
+ "startup_nodes could not agree on a valid slots cache"), str(
+ ex.value)
+
+ def test_cluster_one_instance(self):
+ """
+ If the cluster exists of only 1 node then there is some hacks that must
+ be validated they work.
+ """
+ node = ClusterNode(default_host, default_port)
+ cluster_slots = [[0, 16383, ['', default_port]]]
+ rc = get_mocked_redis_client(startup_nodes=[node],
+ cluster_slots=cluster_slots)
+
+ n = rc.nodes_manager
+ assert len(n.nodes_cache) == 1
+ n_node = rc.get_node(node_name=node.name)
+ assert n_node is not None
+ assert n_node == node
+ assert n_node.server_type == PRIMARY
+ assert len(n.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
+ for i in range(0, REDIS_CLUSTER_HASH_SLOTS):
+ assert n.slots_cache[i] == [n_node]
+
+ def test_init_with_down_node(self):
+ """
+ If I can't connect to one of the nodes, everything should still work.
+ But if I can't connect to any of the nodes, exception should be thrown.
+ """
+ with patch.object(NodesManager,
+ 'create_redis_node') as create_redis_node:
+ def create_mocked_redis_node(host, port, **kwargs):
+ if port == 7000:
+ raise ConnectionError('mock connection error for 7000')
+
+ r_node = Redis(host=host, port=port, decode_responses=True)
+
+ def execute_command(*args, **kwargs):
+ if args[0] == 'CLUSTER SLOTS':
+ return [
+ [
+ 0, 8191,
+ ['127.0.0.1', 7001, 'node_1'],
+ ],
+ [
+ 8192, 16383,
+ ['127.0.0.1', 7002, 'node_2'],
+ ]
+ ]
+ elif args[1] == 'cluster-require-full-coverage':
+ return {'cluster-require-full-coverage': 'yes'}
+
+ r_node.execute_command = execute_command
+
+ return r_node
+
+ create_redis_node.side_effect = create_mocked_redis_node
+
+ node_1 = ClusterNode('127.0.0.1', 7000)
+ node_2 = ClusterNode('127.0.0.1', 7001)
+
+ # If all startup nodes fail to connect, connection error should be
+ # thrown
+ with pytest.raises(RedisClusterException) as e:
+ RedisCluster(startup_nodes=[node_1])
+ assert 'Redis Cluster cannot be connected' in str(e.value)
+
+ with patch.object(CommandsParser, 'initialize',
+ autospec=True) as cmd_parser_initialize:
+
+ def cmd_init_mock(self, r):
+ self.commands = {'get': {'name': 'get', 'arity': 2,
+ 'flags': ['readonly',
+ 'fast'],
+ 'first_key_pos': 1,
+ 'last_key_pos': 1,
+ 'step_count': 1}}
+
+ cmd_parser_initialize.side_effect = cmd_init_mock
+ # When at least one startup node is reachable, the cluster
+ # initialization should succeeds
+ rc = RedisCluster(startup_nodes=[node_1, node_2])
+ assert rc.get_node(host=default_host, port=7001) is not None
+ assert rc.get_node(host=default_host, port=7002) is not None
+
+
+@pytest.mark.onlycluster
+class TestClusterPubSubObject:
+ """
+ Tests for the ClusterPubSub class
+ """
+
+ def test_init_pubsub_with_host_and_port(self, r):
+ """
+ Test creation of pubsub instance with passed host and port
+ """
+ node = r.get_default_node()
+ p = r.pubsub(host=node.host, port=node.port)
+ assert p.get_pubsub_node() == node
+
+ def test_init_pubsub_with_node(self, r):
+ """
+ Test creation of pubsub instance with passed node
+ """
+ node = r.get_default_node()
+ p = r.pubsub(node=node)
+ assert p.get_pubsub_node() == node
+
+ def test_init_pubusub_without_specifying_node(self, r):
+ """
+ Test creation of pubsub instance without specifying a node. The node
+ should be determined based on the keyslot of the first command
+ execution.
+ """
+ channel_name = 'foo'
+ node = r.get_node_from_key(channel_name)
+ p = r.pubsub()
+ assert p.get_pubsub_node() is None
+ p.subscribe(channel_name)
+ assert p.get_pubsub_node() == node
+
+ def test_init_pubsub_with_a_non_existent_node(self, r):
+ """
+ Test creation of pubsub instance with node that doesn't exists in the
+ cluster. RedisClusterException should be raised.
+ """
+ node = ClusterNode('1.1.1.1', 1111)
+ with pytest.raises(RedisClusterException):
+ r.pubsub(node)
+
+ def test_init_pubsub_with_a_non_existent_host_port(self, r):
+ """
+ Test creation of pubsub instance with host and port that don't belong
+ to a node in the cluster.
+ RedisClusterException should be raised.
+ """
+ with pytest.raises(RedisClusterException):
+ r.pubsub(host='1.1.1.1', port=1111)
+
+ def test_init_pubsub_host_or_port(self, r):
+ """
+ Test creation of pubsub instance with host but without port, and vice
+ versa. DataError should be raised.
+ """
+ with pytest.raises(DataError):
+ r.pubsub(host='localhost')
+
+ with pytest.raises(DataError):
+ r.pubsub(port=16379)
+
+ def test_get_redis_connection(self, r):
+ """
+ Test that get_redis_connection() returns the redis connection of the
+ set pubsub node
+ """
+ node = r.get_default_node()
+ p = r.pubsub(node=node)
+ assert p.get_redis_connection() == node.redis_connection
+
+
+@pytest.mark.onlycluster
+class TestClusterPipeline:
+ """
+ Tests for the ClusterPipeline class
+ """
+
+ def test_blocked_methods(self, r):
+ """
+ Currently some method calls on a Cluster pipeline
+ is blocked when using in cluster mode.
+ They maybe implemented in the future.
+ """
+ pipe = r.pipeline()
+ with pytest.raises(RedisClusterException):
+ pipe.multi()
+
+ with pytest.raises(RedisClusterException):
+ pipe.immediate_execute_command()
+
+ with pytest.raises(RedisClusterException):
+ pipe._execute_transaction(None, None, None)
+
+ with pytest.raises(RedisClusterException):
+ pipe.load_scripts()
+
+ with pytest.raises(RedisClusterException):
+ pipe.watch()
+
+ with pytest.raises(RedisClusterException):
+ pipe.unwatch()
+
+ with pytest.raises(RedisClusterException):
+ pipe.script_load_for_pipeline(None)
+
+ with pytest.raises(RedisClusterException):
+ pipe.eval()
+
+ def test_blocked_arguments(self, r):
+ """
+ Currently some arguments is blocked when using in cluster mode.
+ They maybe implemented in the future.
+ """
+ with pytest.raises(RedisClusterException) as ex:
+ r.pipeline(transaction=True)
+
+ assert str(ex.value).startswith(
+ "transaction is deprecated in cluster mode") is True
+
+ with pytest.raises(RedisClusterException) as ex:
+ r.pipeline(shard_hint=True)
+
+ assert str(ex.value).startswith(
+ "shard_hint is deprecated in cluster mode") is True
+
+ def test_redis_cluster_pipeline(self, r):
+ """
+ Test that we can use a pipeline with the RedisCluster class
+ """
+ with r.pipeline() as pipe:
+ pipe.set("foo", "bar")
+ pipe.get("foo")
+ assert pipe.execute() == [True, b'bar']
+
+ def test_mget_disabled(self, r):
+ """
+ Test that mget is disabled for ClusterPipeline
+ """
+ with r.pipeline() as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.mget(['a'])
+
+ def test_mset_disabled(self, r):
+ """
+ Test that mset is disabled for ClusterPipeline
+ """
+ with r.pipeline() as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.mset({'a': 1, 'b': 2})
+
+ def test_rename_disabled(self, r):
+ """
+ Test that rename is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.rename('a', 'b')
+
+ def test_renamenx_disabled(self, r):
+ """
+ Test that renamenx is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.renamenx('a', 'b')
+
+ def test_delete_single(self, r):
+ """
+ Test a single delete operation
+ """
+ r['a'] = 1
+ with r.pipeline(transaction=False) as pipe:
+ pipe.delete('a')
+ assert pipe.execute() == [1]
+
+ def test_multi_delete_unsupported(self, r):
+ """
+ Test that multi delete operation is unsupported
+ """
+ with r.pipeline(transaction=False) as pipe:
+ r['a'] = 1
+ r['b'] = 2
+ with pytest.raises(RedisClusterException):
+ pipe.delete('a', 'b')
+
+ def test_brpoplpush_disabled(self, r):
+ """
+ Test that brpoplpush is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.brpoplpush()
+
+ def test_rpoplpush_disabled(self, r):
+ """
+ Test that rpoplpush is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.rpoplpush()
+
+ def test_sort_disabled(self, r):
+ """
+ Test that sort is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sort()
+
+ def test_sdiff_disabled(self, r):
+ """
+ Test that sdiff is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sdiff()
+
+ def test_sdiffstore_disabled(self, r):
+ """
+ Test that sdiffstore is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sdiffstore()
+
+ def test_sinter_disabled(self, r):
+ """
+ Test that sinter is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sinter()
+
+ def test_sinterstore_disabled(self, r):
+ """
+ Test that sinterstore is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sinterstore()
+
+ def test_smove_disabled(self, r):
+ """
+ Test that move is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.smove()
+
+ def test_sunion_disabled(self, r):
+ """
+ Test that sunion is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sunion()
+
+ def test_sunionstore_disabled(self, r):
+ """
+ Test that sunionstore is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sunionstore()
+
+ def test_spfmerge_disabled(self, r):
+ """
+ Test that spfmerge is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.pfmerge()
+
+ def test_multi_key_operation_with_a_single_slot(self, r):
+ """
+ Test multi key operation with a single slot
+ """
+ pipe = r.pipeline(transaction=False)
+ pipe.set('a{foo}', 1)
+ pipe.set('b{foo}', 2)
+ pipe.set('c{foo}', 3)
+ pipe.get('a{foo}')
+ pipe.get('b{foo}')
+ pipe.get('c{foo}')
+
+ res = pipe.execute()
+ assert res == [True, True, True, b'1', b'2', b'3']
+
+ def test_multi_key_operation_with_multi_slots(self, r):
+ """
+ Test multi key operation with more than one slot
+ """
+ pipe = r.pipeline(transaction=False)
+ pipe.set('a{foo}', 1)
+ pipe.set('b{foo}', 2)
+ pipe.set('c{foo}', 3)
+ pipe.set('bar', 4)
+ pipe.set('bazz', 5)
+ pipe.get('a{foo}')
+ pipe.get('b{foo}')
+ pipe.get('c{foo}')
+ pipe.get('bar')
+ pipe.get('bazz')
+ res = pipe.execute()
+ assert res == [True, True, True, True, True, b'1', b'2', b'3', b'4',
+ b'5']
+
+ def test_connection_error_not_raised(self, r):
+ """
+ Test that the pipeline doesn't raise an error on connection error when
+ raise_on_error=False
+ """
+ key = 'foo'
+ node = r.get_node_from_key(key, False)
+
+ def raise_connection_error():
+ e = ConnectionError("error")
+ return e
+
+ with r.pipeline() as pipe:
+ mock_node_resp_func(node, raise_connection_error)
+ res = pipe.get(key).get(key).execute(raise_on_error=False)
+ assert node.redis_connection.connection.read_response.called
+ assert isinstance(res[0], ConnectionError)
+
+ def test_connection_error_raised(self, r):
+ """
+ Test that the pipeline raises an error on connection error when
+ raise_on_error=True
+ """
+ key = 'foo'
+ node = r.get_node_from_key(key, False)
+
+ def raise_connection_error():
+ e = ConnectionError("error")
+ return e
+
+ with r.pipeline() as pipe:
+ mock_node_resp_func(node, raise_connection_error)
+ with pytest.raises(ConnectionError):
+ pipe.get(key).get(key).execute(raise_on_error=True)
+
+ def test_asking_error(self, r):
+ """
+ Test redirection on ASK error
+ """
+ key = 'foo'
+ first_node = r.get_node_from_key(key, False)
+ ask_node = None
+ for node in r.get_nodes():
+ if node != first_node:
+ ask_node = node
+ break
+ if ask_node is None:
+ warnings.warn("skipping this test since the cluster has only one "
+ "node")
+ return
+ ask_msg = "{0} {1}:{2}".format(r.keyslot(key), ask_node.host,
+ ask_node.port)
+
+ def raise_ask_error():
+ raise AskError(ask_msg)
+
+ with r.pipeline() as pipe:
+ mock_node_resp_func(first_node, raise_ask_error)
+ mock_node_resp(ask_node, 'MOCK_OK')
+ res = pipe.get(key).execute()
+ assert first_node.redis_connection.connection.read_response.called
+ assert ask_node.redis_connection.connection.read_response.called
+ assert res == ['MOCK_OK']
+
+ def test_empty_stack(self, r):
+ """
+ If pipeline is executed with no commands it should
+ return a empty list.
+ """
+ p = r.pipeline()
+ result = p.execute()
+ assert result == []
+
+
+@pytest.mark.onlycluster
+class TestReadOnlyPipeline:
+ """
+ Tests for ClusterPipeline class in readonly mode
+ """
+
+ def test_pipeline_readonly(self, r):
+ """
+ On readonly mode, we supports get related stuff only.
+ """
+ r.readonly(target_nodes='all')
+ r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001
+ r.zadd('foo88',
+ {'z1': 1}) # we assume this key is set on 127.0.0.1:7002
+ r.zadd('foo88', {'z2': 4})
+
+ with r.pipeline() as readonly_pipe:
+ readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True)
+ assert readonly_pipe.execute() == [
+ b'a1',
+ [(b'z1', 1.0), (b'z2', 4)],
+ ]
+
+ def test_moved_redirection_on_slave_with_default(self, r):
+ """
+ On Pipeline, we redirected once and finally get from master with
+ readonly client when data is completely moved.
+ """
+ key = 'bar'
+ r.set(key, 'foo')
+ # set read_from_replicas to True
+ r.read_from_replicas = True
+ primary = r.get_node_from_key(key, False)
+ replica = r.get_node_from_key(key, True)
+ with r.pipeline() as readwrite_pipe:
+ mock_node_resp(primary, "MOCK_FOO")
+ if replica is not None:
+ moved_error = "{0} {1}:{2}".format(r.keyslot(key),
+ primary.host,
+ primary.port)
+
+ def raise_moved_error():
+ raise MovedError(moved_error)
+
+ mock_node_resp_func(replica, raise_moved_error)
+ assert readwrite_pipe.reinitialize_counter == 0
+ readwrite_pipe.get(key).get(key)
+ assert readwrite_pipe.execute() == ["MOCK_FOO", "MOCK_FOO"]
+ if replica is not None:
+ # the slot has a replica as well, so MovedError should have
+ # occurred. If MovedError occurs, we should see the
+ # reinitialize_counter increase.
+ assert readwrite_pipe.reinitialize_counter == 1
+ conn = replica.redis_connection.connection
+ assert conn.read_response.called is True
+
+ def test_readonly_pipeline_from_readonly_client(self, request):
+ """
+ Test that the pipeline is initialized with readonly mode if the client
+ has it enabled
+ """
+ # Create a cluster with reading from replications
+ ro = _get_client(RedisCluster, request, read_from_replicas=True)
+ key = 'bar'
+ ro.set(key, 'foo')
+ import time
+ time.sleep(0.2)
+ with ro.pipeline() as readonly_pipe:
+ mock_all_nodes_resp(ro, 'MOCK_OK')
+ assert readonly_pipe.read_from_replicas is True
+ assert readonly_pipe.get(key).get(
+ key).execute() == ['MOCK_OK', 'MOCK_OK']
+ slot_nodes = ro.nodes_manager.slots_cache[ro.keyslot(key)]
+ if len(slot_nodes) > 1:
+ executed_on_replica = False
+ for node in slot_nodes:
+ if node.server_type == REPLICA:
+ conn = node.redis_connection.connection
+ executed_on_replica = conn.read_response.called
+ if executed_on_replica:
+ break
+ assert executed_on_replica is True
diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py
new file mode 100644
index 0000000..ba129ba
--- /dev/null
+++ b/tests/test_command_parser.py
@@ -0,0 +1,62 @@
+import pytest
+
+from redis.commands import CommandsParser
+
+
+class TestCommandsParser:
+ def test_init_commands(self, r):
+ commands_parser = CommandsParser(r)
+ assert commands_parser.commands is not None
+ assert 'get' in commands_parser.commands
+
+ def test_get_keys_predetermined_key_location(self, r):
+ commands_parser = CommandsParser(r)
+ args1 = ['GET', 'foo']
+ args2 = ['OBJECT', 'encoding', 'foo']
+ args3 = ['MGET', 'foo', 'bar', 'foobar']
+ assert commands_parser.get_keys(r, *args1) == ['foo']
+ assert commands_parser.get_keys(r, *args2) == ['foo']
+ assert commands_parser.get_keys(r, *args3) == ['foo', 'bar', 'foobar']
+
+ @pytest.mark.filterwarnings("ignore:ResponseError")
+ def test_get_moveable_keys(self, r):
+ commands_parser = CommandsParser(r)
+ args1 = ['EVAL', 'return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}', 2, 'key1',
+ 'key2', 'first', 'second']
+ args2 = ['XREAD', 'COUNT', 2, b'STREAMS', 'mystream', 'writers', 0, 0]
+ args3 = ['ZUNIONSTORE', 'out', 2, 'zset1', 'zset2', 'WEIGHTS', 2, 3]
+ args4 = ['GEORADIUS', 'Sicily', 15, 37, 200, 'km', 'WITHCOORD',
+ b'STORE', 'out']
+ args5 = ['MEMORY USAGE', 'foo']
+ args6 = ['MIGRATE', '192.168.1.34', 6379, "", 0, 5000, b'KEYS',
+ 'key1', 'key2', 'key3']
+ args7 = ['MIGRATE', '192.168.1.34', 6379, "key1", 0, 5000]
+ args8 = ['STRALGO', 'LCS', 'STRINGS', 'string_a', 'string_b']
+ args9 = ['STRALGO', 'LCS', 'KEYS', 'key1', 'key2']
+
+ assert commands_parser.get_keys(
+ r, *args1).sort() == ['key1', 'key2'].sort()
+ assert commands_parser.get_keys(
+ r, *args2).sort() == ['mystream', 'writers'].sort()
+ assert commands_parser.get_keys(
+ r, *args3).sort() == ['out', 'zset1', 'zset2'].sort()
+ assert commands_parser.get_keys(
+ r, *args4).sort() == ['Sicily', 'out'].sort()
+ assert commands_parser.get_keys(r, *args5).sort() == ['foo'].sort()
+ assert commands_parser.get_keys(
+ r, *args6).sort() == ['key1', 'key2', 'key3'].sort()
+ assert commands_parser.get_keys(r, *args7).sort() == ['key1'].sort()
+ assert commands_parser.get_keys(r, *args8) is None
+ assert commands_parser.get_keys(
+ r, *args9).sort() == ['key1', 'key2'].sort()
+
+ def test_get_pubsub_keys(self, r):
+ commands_parser = CommandsParser(r)
+ args1 = ['PUBLISH', 'foo', 'bar']
+ args2 = ['PUBSUB NUMSUB', 'foo1', 'foo2', 'foo3']
+ args3 = ['PUBSUB channels', '*']
+ args4 = ['SUBSCRIBE', 'foo1', 'foo2', 'foo3']
+ assert commands_parser.get_keys(r, *args1) == ['foo']
+ assert commands_parser.get_keys(r, *args2) == ['foo1', 'foo2', 'foo3']
+ assert commands_parser.get_keys(r, *args3) == ['*']
+ assert commands_parser.get_keys(r, *args4) == ['foo1', 'foo2', 'foo3']
diff --git a/tests/test_commands.py b/tests/test_commands.py
index dbd0442..f526ae5 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -47,6 +47,7 @@ def get_stream_message(client, stream, message_id):
# RESPONSE CALLBACKS
+@pytest.mark.onlynoncluster
class TestResponseCallbacks:
"Tests for the response callback system"
@@ -68,18 +69,21 @@ class TestRedisCommands:
r['a']
# SERVER INFORMATION
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_cat_no_category(self, r):
categories = r.acl_cat()
assert isinstance(categories, list)
assert 'read' in categories
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_cat_with_category(self, r):
commands = r.acl_cat('read')
assert isinstance(commands, list)
assert 'get' in commands
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_deluser(self, r, request):
@@ -105,6 +109,7 @@ class TestRedisCommands:
assert r.acl_getuser(users[3]) is None
assert r.acl_getuser(users[4]) is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_genpass(self, r):
@@ -119,6 +124,7 @@ class TestRedisCommands:
r.acl_genpass(555)
assert isinstance(password, str)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_getuser_setuser(self, r, request):
@@ -207,12 +213,14 @@ class TestRedisCommands:
hashed_passwords=['-' + hashed_password])
assert len(r.acl_getuser(username)['passwords']) == 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_help(self, r):
res = r.acl_help()
assert isinstance(res, list)
assert len(res) != 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_list(self, r, request):
@@ -226,6 +234,7 @@ class TestRedisCommands:
users = r.acl_list()
assert len(users) == 2
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_log(self, r, request):
@@ -262,6 +271,7 @@ class TestRedisCommands:
assert 'client-info' in r.acl_log(count=1)[0]
assert r.acl_log_reset()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_categories_without_prefix_fails(self, r, request):
@@ -274,6 +284,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.acl_setuser(username, categories=['list'])
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_commands_without_prefix_fails(self, r, request):
@@ -286,6 +297,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.acl_setuser(username, commands=['get'])
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_add_passwords_and_nopass_fails(self, r, request):
@@ -298,28 +310,33 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.acl_setuser(username, passwords='+mypass', nopass=True)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_users(self, r):
users = r.acl_users()
assert isinstance(users, list)
assert len(users) > 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_whoami(self, r):
username = r.acl_whoami()
assert isinstance(username, str)
+ @pytest.mark.onlynoncluster
def test_client_list(self, r):
clients = r.client_list()
assert isinstance(clients[0], dict)
assert 'addr' in clients[0]
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_client_info(self, r):
info = r.client_info()
assert isinstance(info, dict)
assert 'addr' in info
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_client_list_types_not_replica(self, r):
with pytest.raises(exceptions.RedisError):
@@ -333,6 +350,7 @@ class TestRedisCommands:
clients = r.client_list(_type='replica')
assert isinstance(clients, list)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_client_list_client_id(self, r, request):
clients = r.client_list()
@@ -347,16 +365,19 @@ class TestRedisCommands:
clients_listed = r.client_list(client_id=clients[:-1])
assert len(clients_listed) > 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_client_id(self, r):
assert r.client_id() > 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_client_trackinginfo(self, r):
res = r.client_trackinginfo()
assert len(res) > 2
assert 'prefixes' in res
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_client_unblock(self, r):
myid = r.client_id()
@@ -364,15 +385,18 @@ class TestRedisCommands:
assert not r.client_unblock(myid, error=True)
assert not r.client_unblock(myid, error=False)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.9')
def test_client_getname(self, r):
assert r.client_getname() is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.9')
def test_client_setname(self, r):
assert r.client_setname('redis_py_test')
assert r.client_getname() == 'redis_py_test'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.9')
def test_client_kill(self, r, r2):
r.client_setname('redis-py-c1')
@@ -406,6 +430,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.client_kill_filter(_type="caster")
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.12')
def test_client_kill_filter_by_id(self, r, r2):
r.client_setname('redis-py-c1')
@@ -426,6 +451,7 @@ class TestRedisCommands:
assert len(clients) == 1
assert clients[0].get('name') == 'redis-py-c1'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.12')
def test_client_kill_filter_by_addr(self, r, r2):
r.client_setname('redis-py-c1')
@@ -481,6 +507,7 @@ class TestRedisCommands:
assert c['user'] != killuser
r.acl_deluser(killuser)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.9.50')
@skip_if_redis_enterprise
def test_client_pause(self, r):
@@ -489,11 +516,13 @@ class TestRedisCommands:
with pytest.raises(exceptions.RedisError):
r.client_pause(timeout='not an integer')
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
@skip_if_redis_enterprise
def test_client_unpause(self, r):
assert r.client_unpause() == b'OK'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.2.0')
def test_client_reply(self, r, r_timeout):
assert r_timeout.client_reply('ON') == b'OK'
@@ -507,6 +536,7 @@ class TestRedisCommands:
# validate it was set
assert r.get('foo') == b'bar'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.0.0')
@skip_if_redis_enterprise
def test_client_getredir(self, r):
@@ -519,6 +549,7 @@ class TestRedisCommands:
# # assert 'maxmemory' in data
# assert data['maxmemory'].isdigit()
+ @pytest.mark.onlynoncluster
@skip_if_redis_enterprise
def test_config_resetstat(self, r):
r.ping()
@@ -535,14 +566,17 @@ class TestRedisCommands:
assert r.config_set('timeout', 0)
assert r.config_get()['timeout'] == '0'
+ @pytest.mark.onlynoncluster
def test_dbsize(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
assert r.dbsize() == 2
+ @pytest.mark.onlynoncluster
def test_echo(self, r):
assert r.echo('foo bar') == b'foo bar'
+ @pytest.mark.onlynoncluster
def test_info(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
@@ -551,10 +585,12 @@ class TestRedisCommands:
assert 'arch_bits' in info.keys()
assert 'redis_version' in info.keys()
+ @pytest.mark.onlynoncluster
@skip_if_redis_enterprise
def test_lastsave(self, r):
assert isinstance(r.lastsave(), datetime.datetime)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_lolwut(self, r):
lolwut = r.lolwut().decode('utf-8')
@@ -573,9 +609,11 @@ class TestRedisCommands:
def test_ping(self, r):
assert r.ping()
+ @pytest.mark.onlynoncluster
def test_quit(self, r):
assert r.quit()
+ @pytest.mark.onlynoncluster
def test_slowlog_get(self, r, slowlog):
assert r.slowlog_reset()
unicode_string = chr(3456) + 'abcd' + chr(3421)
@@ -613,6 +651,7 @@ class TestRedisCommands:
# Complexity info stored as fourth item in list
response.insert(3, COMPLEXITY_STATEMENT)
return r.response_callbacks[command_name](responses, **options)
+
r.parse_response = parse_response
# test
@@ -626,6 +665,7 @@ class TestRedisCommands:
# tear down monkeypatch
r.parse_response = old_parse_response
+ @pytest.mark.onlynoncluster
def test_slowlog_get_limit(self, r, slowlog):
assert r.slowlog_reset()
r.get('foo')
@@ -634,6 +674,7 @@ class TestRedisCommands:
# only one command, based on the number we passed to slowlog_get()
assert len(slowlog) == 1
+ @pytest.mark.onlynoncluster
def test_slowlog_length(self, r, slowlog):
r.get('foo')
assert isinstance(r.slowlog_len(), int)
@@ -677,12 +718,14 @@ class TestRedisCommands:
assert r.bitcount('a', -2, -1) == 2
assert r.bitcount('a', 1, 1) == 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_not_empty_string(self, r):
r['a'] = ''
r.bitop('not', 'r', 'a')
assert r.get('r') is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_not(self, r):
test_str = b'\xAA\x00\xFF\x55'
@@ -691,6 +734,7 @@ class TestRedisCommands:
r.bitop('not', 'r', 'a')
assert int(binascii.hexlify(r['r']), 16) == correct
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_not_in_place(self, r):
test_str = b'\xAA\x00\xFF\x55'
@@ -699,6 +743,7 @@ class TestRedisCommands:
r.bitop('not', 'a', 'a')
assert int(binascii.hexlify(r['a']), 16) == correct
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_single_string(self, r):
test_str = b'\x01\x02\xFF'
@@ -710,6 +755,7 @@ class TestRedisCommands:
assert r['res2'] == test_str
assert r['res3'] == test_str
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_string_operands(self, r):
r['a'] = b'\x01\x02\xFF\xFF'
@@ -721,6 +767,7 @@ class TestRedisCommands:
assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF
assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.7')
def test_bitpos(self, r):
key = 'key:bitpos'
@@ -743,6 +790,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.RedisError):
r.bitpos(key, 7) == 12
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_copy(self, r):
assert r.copy("a", "b") == 0
@@ -751,6 +799,7 @@ class TestRedisCommands:
assert r.get("a") == b"foo"
assert r.get("b") == b"foo"
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_copy_and_replace(self, r):
r.set("a", "foo1")
@@ -758,6 +807,7 @@ class TestRedisCommands:
assert r.copy("a", "b") == 0
assert r.copy("a", "b", replace=True) == 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_copy_to_another_database(self, request):
r0 = _get_client(redis.Redis, request, db=0)
@@ -973,6 +1023,7 @@ class TestRedisCommands:
assert r.incrbyfloat('a', 1.1) == 2.1
assert float(r['a']) == float(2.1)
+ @pytest.mark.onlynoncluster
def test_keys(self, r):
assert r.keys() == []
keys_with_underscores = {b'test_a', b'test_b'}
@@ -982,6 +1033,7 @@ class TestRedisCommands:
assert set(r.keys(pattern='test_*')) == keys_with_underscores
assert set(r.keys(pattern='test*')) == keys
+ @pytest.mark.onlynoncluster
def test_mget(self, r):
assert r.mget([]) == []
assert r.mget(['a', 'b']) == [None, None]
@@ -990,24 +1042,28 @@ class TestRedisCommands:
r['c'] = '3'
assert r.mget('a', 'other', 'b', 'c') == [b'1', None, b'2', b'3']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_lmove(self, r):
r.rpush('a', 'one', 'two', 'three', 'four')
assert r.lmove('a', 'b')
assert r.lmove('a', 'b', 'right', 'left')
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_blmove(self, r):
r.rpush('a', 'one', 'two', 'three', 'four')
assert r.blmove('a', 'b', 5)
assert r.blmove('a', 'b', 1, 'RIGHT', 'LEFT')
+ @pytest.mark.onlynoncluster
def test_mset(self, r):
d = {'a': b'1', 'b': b'2', 'c': b'3'}
assert r.mset(d)
for k, v in d.items():
assert r[k] == v
+ @pytest.mark.onlynoncluster
def test_msetnx(self, r):
d = {'a': b'1', 'b': b'2', 'c': b'3'}
assert r.msetnx(d)
@@ -1086,18 +1142,21 @@ class TestRedisCommands:
# with duplications
assert len(r.hrandfield('key', -10)) == 10
+ @pytest.mark.onlynoncluster
def test_randomkey(self, r):
assert r.randomkey() is None
for key in ('a', 'b', 'c'):
r[key] = 1
assert r.randomkey() in (b'a', b'b', b'c')
+ @pytest.mark.onlynoncluster
def test_rename(self, r):
r['a'] = '1'
assert r.rename('a', 'b')
assert r.get('a') is None
assert r['b'] == b'1'
+ @pytest.mark.onlynoncluster
def test_renamenx(self, r):
r['a'] = '1'
r['b'] = '2'
@@ -1203,8 +1262,8 @@ class TestRedisCommands:
@skip_if_server_version_lt('6.0.0')
def test_stralgo_lcs(self, r):
- key1 = 'key1'
- key2 = 'key2'
+ key1 = '{foo}key1'
+ key2 = '{foo}key2'
value1 = 'ohmytext'
value2 = 'mynewtext'
res = 'mytext'
@@ -1294,6 +1353,7 @@ class TestRedisCommands:
assert r.type('a') == b'zset'
# LIST COMMANDS
+ @pytest.mark.onlynoncluster
def test_blpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
@@ -1305,6 +1365,7 @@ class TestRedisCommands:
r.rpush('c', '1')
assert r.blpop('c', timeout=1) == (b'c', b'1')
+ @pytest.mark.onlynoncluster
def test_brpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
@@ -1316,6 +1377,7 @@ class TestRedisCommands:
r.rpush('c', '1')
assert r.brpop('c', timeout=1) == (b'c', b'1')
+ @pytest.mark.onlynoncluster
def test_brpoplpush(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
@@ -1325,6 +1387,7 @@ class TestRedisCommands:
assert r.lrange('a', 0, -1) == []
assert r.lrange('b', 0, -1) == [b'1', b'2', b'3', b'4']
+ @pytest.mark.onlynoncluster
def test_brpoplpush_empty_string(self, r):
r.rpush('a', '')
assert r.brpoplpush('a', 'b') == b''
@@ -1428,6 +1491,7 @@ class TestRedisCommands:
assert r.rpop('a') is None
assert r.rpop('a', 3) is None
+ @pytest.mark.onlynoncluster
def test_rpoplpush(self, r):
r.rpush('a', 'a1', 'a2', 'a3')
r.rpush('b', 'b1', 'b2', 'b3')
@@ -1481,6 +1545,7 @@ class TestRedisCommands:
assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4']
# SCAN COMMANDS
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_scan(self, r):
r.set('a', 1)
@@ -1492,6 +1557,7 @@ class TestRedisCommands:
_, keys = r.scan(match='a')
assert set(keys) == {b'a'}
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_scan_type(self, r):
r.sadd('a-set', 1)
@@ -1500,6 +1566,7 @@ class TestRedisCommands:
_, keys = r.scan(match='a*', _type='SET')
assert set(keys) == {b'a-set'}
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_scan_iter(self, r):
r.set('a', 1)
@@ -1571,12 +1638,14 @@ class TestRedisCommands:
r.sadd('a', '1', '2', '3')
assert r.scard('a') == 3
+ @pytest.mark.onlynoncluster
def test_sdiff(self, r):
r.sadd('a', '1', '2', '3')
assert r.sdiff('a', 'b') == {b'1', b'2', b'3'}
r.sadd('b', '2', '3')
assert r.sdiff('a', 'b') == {b'1'}
+ @pytest.mark.onlynoncluster
def test_sdiffstore(self, r):
r.sadd('a', '1', '2', '3')
assert r.sdiffstore('c', 'a', 'b') == 3
@@ -1585,12 +1654,14 @@ class TestRedisCommands:
assert r.sdiffstore('c', 'a', 'b') == 1
assert r.smembers('c') == {b'1'}
+ @pytest.mark.onlynoncluster
def test_sinter(self, r):
r.sadd('a', '1', '2', '3')
assert r.sinter('a', 'b') == set()
r.sadd('b', '2', '3')
assert r.sinter('a', 'b') == {b'2', b'3'}
+ @pytest.mark.onlynoncluster
def test_sinterstore(self, r):
r.sadd('a', '1', '2', '3')
assert r.sinterstore('c', 'a', 'b') == 0
@@ -1617,6 +1688,7 @@ class TestRedisCommands:
assert r.smismember('a', '1', '4', '2', '3') == result_list
assert r.smismember('a', ['1', '4', '2', '3']) == result_list
+ @pytest.mark.onlynoncluster
def test_smove(self, r):
r.sadd('a', 'a1', 'a2')
r.sadd('b', 'b1', 'b2')
@@ -1662,11 +1734,13 @@ class TestRedisCommands:
assert r.srem('a', '2', '4') == 2
assert r.smembers('a') == {b'1', b'3'}
+ @pytest.mark.onlynoncluster
def test_sunion(self, r):
r.sadd('a', '1', '2')
r.sadd('b', '2', '3')
assert r.sunion('a', 'b') == {b'1', b'2', b'3'}
+ @pytest.mark.onlynoncluster
def test_sunionstore(self, r):
r.sadd('a', '1', '2')
r.sadd('b', '2', '3')
@@ -1678,6 +1752,7 @@ class TestRedisCommands:
with pytest.raises(NotImplementedError):
r.debug_segfault()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.2.0')
def test_script_debug(self, r):
with pytest.raises(NotImplementedError):
@@ -1759,6 +1834,7 @@ class TestRedisCommands:
assert r.zcount('a', 1, '(' + str(2)) == 1
assert r.zcount('a', 10, 20) == 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zdiff(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
@@ -1766,6 +1842,7 @@ class TestRedisCommands:
assert r.zdiff(['a', 'b']) == [b'a3']
assert r.zdiff(['a', 'b'], withscores=True) == [b'a3', b'3']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zdiffstore(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
@@ -1787,6 +1864,7 @@ class TestRedisCommands:
assert r.zlexcount('a', '-', '+') == 7
assert r.zlexcount('a', '[b', '[f') == 5
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zinter(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 1})
@@ -1809,6 +1887,7 @@ class TestRedisCommands:
assert r.zinter({'a': 1, 'b': 2, 'c': 3}, withscores=True) \
== [(b'a3', 20), (b'a1', 23)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_sum(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -1817,6 +1896,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a3', 8), (b'a1', 9)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_max(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -1825,6 +1905,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a3', 5), (b'a1', 6)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_min(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5})
@@ -1833,6 +1914,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a1', 1), (b'a3', 3)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_with_weight(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -1871,6 +1953,7 @@ class TestRedisCommands:
# with duplications
assert len(r.zrandmember('a', -10)) == 10
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.9.0')
def test_bzpopmax(self, r):
r.zadd('a', {'a1': 1, 'a2': 2})
@@ -1883,6 +1966,7 @@ class TestRedisCommands:
r.zadd('c', {'c1': 100})
assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.9.0')
def test_bzpopmin(self, r):
r.zadd('a', {'a1': 1, 'a2': 2})
@@ -1952,6 +2036,7 @@ class TestRedisCommands:
# rev
assert r.zrange('a', 0, 1, desc=True) == [b'a5', b'a4']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zrangestore(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
@@ -2089,6 +2174,7 @@ class TestRedisCommands:
assert r.zscore('a', 'a2') == 2.0
assert r.zscore('a', 'a4') is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zunion(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
@@ -2109,6 +2195,7 @@ class TestRedisCommands:
assert r.zunion({'a': 1, 'b': 2, 'c': 3}, withscores=True)\
== [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_sum(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -2117,6 +2204,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_max(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -2125,6 +2213,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_min(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4})
@@ -2133,6 +2222,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_with_weight(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -2160,6 +2250,7 @@ class TestRedisCommands:
assert r.pfadd('a', *members) == 0
assert r.pfcount('a') == len(members)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.9')
def test_pfcount(self, r):
members = {b'1', b'2', b'3'}
@@ -2170,6 +2261,7 @@ class TestRedisCommands:
assert r.pfcount('b') == len(members_b)
assert r.pfcount('a', 'b') == len(members_b.union(members))
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.9')
def test_pfmerge(self, r):
mema = {b'1', b'2', b'3'}
@@ -2264,8 +2356,9 @@ class TestRedisCommands:
assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3']
def test_hmset(self, r):
- warning_message = (r'^Redis\.hmset\(\) is deprecated\. '
- r'Use Redis\.hset\(\) instead\.$')
+ redis_class = type(r).__name__
+ warning_message = (r'^{0}\.hmset\(\) is deprecated\. '
+ r'Use {0}\.hset\(\) instead\.$'.format(redis_class))
h = {b'a': b'1', b'b': b'2', b'c': b'3'}
with pytest.warns(DeprecationWarning, match=warning_message):
assert r.hmset('a', h)
@@ -2300,6 +2393,7 @@ class TestRedisCommands:
r.rpush('a', '3', '2', '1', '4')
assert r.sort('a', start=1, num=2) == [b'2', b'3']
+ @pytest.mark.onlynoncluster
def test_sort_by(self, r):
r['score:1'] = 8
r['score:2'] = 3
@@ -2307,6 +2401,7 @@ class TestRedisCommands:
r.rpush('a', '3', '2', '1')
assert r.sort('a', by='score:*') == [b'2', b'3', b'1']
+ @pytest.mark.onlynoncluster
def test_sort_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2314,6 +2409,7 @@ class TestRedisCommands:
r.rpush('a', '2', '3', '1')
assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3']
+ @pytest.mark.onlynoncluster
def test_sort_get_multi(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2322,6 +2418,7 @@ class TestRedisCommands:
assert r.sort('a', get=('user:*', '#')) == \
[b'u1', b'1', b'u2', b'2', b'u3', b'3']
+ @pytest.mark.onlynoncluster
def test_sort_get_groups_two(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2330,6 +2427,7 @@ class TestRedisCommands:
assert r.sort('a', get=('user:*', '#'), groups=True) == \
[(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')]
+ @pytest.mark.onlynoncluster
def test_sort_groups_string_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2338,6 +2436,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.sort('a', get='user:*', groups=True)
+ @pytest.mark.onlynoncluster
def test_sort_groups_just_one_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2354,6 +2453,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.sort('a', groups=True)
+ @pytest.mark.onlynoncluster
def test_sort_groups_three_gets(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2378,11 +2478,13 @@ class TestRedisCommands:
assert r.sort('a', alpha=True) == \
[b'a', b'b', b'c', b'd', b'e']
+ @pytest.mark.onlynoncluster
def test_sort_store(self, r):
r.rpush('a', '2', '3', '1')
assert r.sort('a', store='sorted_values') == 3
assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3']
+ @pytest.mark.onlynoncluster
def test_sort_all_options(self, r):
r['user:1:username'] = 'zeus'
r['user:2:username'] = 'titan'
@@ -2415,66 +2517,84 @@ class TestRedisCommands:
r.execute_command('SADD', 'issue#924', 1)
r.execute_command('SORT', 'issue#924')
+ @pytest.mark.onlynoncluster
def test_cluster_addslots(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_count_failure_reports(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'COUNT-FAILURE-REPORTS', 'node'), int)
+ @pytest.mark.onlynoncluster
def test_cluster_countkeysinslot(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'COUNTKEYSINSLOT', 2), int)
+ @pytest.mark.onlynoncluster
def test_cluster_delslots(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_failover(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_forget(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('FORGET', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_info(self, mock_cluster_resp_info):
assert isinstance(mock_cluster_resp_info.cluster('info'), dict)
+ @pytest.mark.onlynoncluster
def test_cluster_keyslot(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'keyslot', 'asdf'), int)
+ @pytest.mark.onlynoncluster
def test_cluster_meet(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_nodes(self, mock_cluster_resp_nodes):
assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict)
+ @pytest.mark.onlynoncluster
def test_cluster_replicate(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True
+ @pytest.mark.onlynoncluster
def test_cluster_reset(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('reset', 'hard') is True
+ @pytest.mark.onlynoncluster
def test_cluster_saveconfig(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('saveconfig') is True
+ @pytest.mark.onlynoncluster
def test_cluster_setslot(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('setslot', 1,
'IMPORTING', 'nodeid') is True
+ @pytest.mark.onlynoncluster
def test_cluster_slaves(self, mock_cluster_resp_slaves):
assert isinstance(mock_cluster_resp_slaves.cluster(
'slaves', 'nodeid'), dict)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.0.0')
@skip_if_redis_enterprise
def test_readwrite(self, r):
assert r.readwrite()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.0.0')
def test_readonly_invalid_cluster_state(self, r):
with pytest.raises(exceptions.RedisError):
r.readonly()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.0.0')
def test_readonly(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.readonly() is True
@@ -2701,6 +2821,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
assert r.geosearch('barcelona', member='place3', radius=100, any=1)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_geosearchstore(self, r):
values = (2.1909389952632, 41.433791470673, 'place1') + \
@@ -2711,6 +2832,7 @@ class TestRedisCommands:
longitude=2.191, latitude=41.433, radius=1000)
assert r.zrange('places_barcelona', 0, -1) == [b'place1']
+ @pytest.mark.onlynoncluster
@skip_unless_arch_bits(64)
@skip_if_server_version_lt('6.2.0')
def test_geosearchstore_dist(self, r):
@@ -2802,6 +2924,7 @@ class TestRedisCommands:
assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') == \
[b'place2', b'place1']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.2.0')
def test_georadius_store(self, r):
values = (2.1909389952632, 41.433791470673, 'place1') + \
@@ -2811,6 +2934,7 @@ class TestRedisCommands:
r.georadius('barcelona', 2.191, 41.433, 1000, store='places_barcelona')
assert r.zrange('places_barcelona', 0, -1) == [b'place1']
+ @pytest.mark.onlynoncluster
@skip_unless_arch_bits(64)
@skip_if_server_version_lt('3.2.0')
def test_georadius_store_dist(self, r):
@@ -3658,6 +3782,7 @@ class TestRedisCommands:
r.set('foo', 'bar')
assert isinstance(r.memory_usage('foo'), int)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.0.0')
@skip_if_redis_enterprise
def test_module_list(self, r):
@@ -3671,14 +3796,25 @@ class TestRedisCommands:
assert isinstance(res, int)
assert res >= 100
+ @pytest.mark.onlynoncluster
+ @skip_if_server_version_lt('2.8.13')
+ def test_command_getkeys(self, r):
+ res = r.command_getkeys('MSET', 'a', 'b', 'c', 'd', 'e', 'f')
+ assert res == ['a', 'c', 'e']
+ res = r.command_getkeys('EVAL', '"not consulted"',
+ '3', 'key1', 'key2', 'key3',
+ 'arg1', 'arg2', 'arg3', 'argN')
+ assert res == ['key1', 'key2', 'key3']
+
@skip_if_server_version_lt('2.8.13')
def test_command(self, r):
res = r.command()
assert len(res) >= 100
- cmds = [c[0].decode() for c in res]
+ cmds = list(res.keys())
assert 'set' in cmds
assert 'get' in cmds
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.0.0')
@skip_if_redis_enterprise
def test_module(self, r):
@@ -3734,6 +3870,7 @@ class TestRedisCommands:
assert r.restore(key, 0, dumpdata, frequency=5)
assert r.get(key) == b'blee!'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
@skip_if_redis_enterprise
def test_replicaof(self, r):
@@ -3742,6 +3879,7 @@ class TestRedisCommands:
assert r.replicaof("NO", "ONE")
+@pytest.mark.onlynoncluster
class TestBinarySave:
def test_binary_get_set(self, r):
diff --git a/tests/test_connection.py b/tests/test_connection.py
index 7c44768..cd8907d 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -8,6 +8,7 @@ from .conftest import skip_if_server_version_lt
@pytest.mark.skipif(HIREDIS_AVAILABLE, reason='PythonParser only')
+@pytest.mark.onlynoncluster
def test_invalid_response(r):
raw = b'x'
parser = r.connection._parser
diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py
index 521f520..288d43d 100644
--- a/tests/test_connection_pool.py
+++ b/tests/test_connection_pool.py
@@ -484,6 +484,7 @@ class TestConnection:
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.8')
@skip_if_redis_enterprise
def test_busy_loading_disconnects_socket(self, r):
@@ -495,6 +496,7 @@ class TestConnection:
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
assert not r.connection._sock
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.8')
@skip_if_redis_enterprise
def test_busy_loading_from_pipeline_immediate_command(self, r):
@@ -511,6 +513,7 @@ class TestConnection:
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.8')
@skip_if_redis_enterprise
def test_busy_loading_from_pipeline(self, r):
@@ -571,6 +574,7 @@ class TestConnection:
r.execute_command('DEBUG', 'ERROR', 'ERR invalid password')
+@pytest.mark.onlynoncluster
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
@@ -584,6 +588,7 @@ class TestMultiConnectionClient:
assert r.get('a') == b'123'
+@pytest.mark.onlynoncluster
class TestHealthCheck:
interval = 60
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 467e00c..402eccf 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -5,7 +5,8 @@ from redis.commands.helpers import (
nativestr,
parse_to_list,
quote_string,
- random_string
+ random_string,
+ parse_to_dict
)
@@ -19,11 +20,34 @@ def test_list_or_args():
def test_parse_to_list():
+ assert parse_to_list(None) == []
r = ["hello", b"my name", "45", "555.55", "is simon!", None]
assert parse_to_list(r) == \
["hello", "my name", 45, 555.55, "is simon!", None]
+def test_parse_to_dict():
+ assert parse_to_dict(None) == {}
+ r = [['Some number', '1.0345'],
+ ['Some string', 'hello'],
+ ['Child iterators',
+ ['Time', '0.2089', 'Counter', 3, 'Child iterators',
+ ['Type', 'bar', 'Time', '0.0729', 'Counter', 3],
+ ['Type', 'barbar', 'Time', '0.058', 'Counter', 3]]]]
+ assert parse_to_dict(r) == {
+ 'Child iterators': {
+ 'Child iterators': [
+ {'Counter': 3.0, 'Time': 0.0729, 'Type': 'bar'},
+ {'Counter': 3.0, 'Time': 0.058, 'Type': 'barbar'}
+ ],
+ 'Counter': 3.0,
+ 'Time': 0.2089
+ },
+ 'Some number': 1.0345,
+ 'Some string': 'hello'
+ }
+
+
def test_nativestr():
assert nativestr('teststr') == 'teststr'
assert nativestr(b'teststr') == 'teststr'
diff --git a/tests/test_json.py b/tests/test_json.py
index abc5776..187bfe2 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -275,7 +275,6 @@ def test_objlen(client):
assert len(obj) == client.json().objlen("obj")
-@pytest.mark.pipeline
@pytest.mark.redismod
def test_json_commands_in_pipeline(client):
p = client.json().pipeline()
@@ -290,8 +289,9 @@ def test_json_commands_in_pipeline(client):
client.flushdb()
p = client.json().pipeline()
d = {"hello": "world", "oh": "snap"}
- p.jsonset("foo", Path.rootPath(), d)
- p.jsonget("foo")
+ with pytest.deprecated_call():
+ p.jsonset("foo", Path.rootPath(), d)
+ p.jsonget("foo")
p.exists("notarealkey")
p.delete("foo")
assert [True, d, 0, 1] == p.execute()
@@ -463,14 +463,18 @@ def test_numby_commands_dollar(client):
client.json().set("doc1", "$", {"a": "b", "b": [
{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- assert client.json().nummultby("doc1", "$..a", 2) == \
- [None, 4, 10, None]
- assert client.json().nummultby("doc1", "$..a", 2.5) == \
- [None, 10.0, 25.0, None]
+ # test list
+ with pytest.deprecated_call():
+ assert client.json().nummultby("doc1", "$..a", 2) == \
+ [None, 4, 10, None]
+ assert client.json().nummultby("doc1", "$..a", 2.5) == \
+ [None, 10.0, 25.0, None]
+
# Test single
- assert client.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
- assert client.json().nummultby("doc1", "$.b[2].a", 2) == [None]
- assert client.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
+ with pytest.deprecated_call():
+ assert client.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
+ assert client.json().nummultby("doc1", "$.b[2].a", 2) == [None]
+ assert client.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
# test missing keys
with pytest.raises(exceptions.ResponseError):
@@ -485,7 +489,9 @@ def test_numby_commands_dollar(client):
# Test legacy NUMMULTBY
client.json().set("doc1", "$", {"a": "b", "b": [
{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- client.json().nummultby("doc1", ".b[0].a", 3) == 6
+
+ with pytest.deprecated_call():
+ client.json().nummultby("doc1", ".b[0].a", 3) == 6
@pytest.mark.redismod
@@ -824,9 +830,11 @@ def test_objkeys_dollar(client):
# Test missing key
assert client.json().objkeys("non_existing_doc", "..a") is None
- # Test missing key
+ # Test non existing doc
with pytest.raises(exceptions.ResponseError):
- client.json().objkeys("doc1", "$.nowhere")
+ assert client.json().objkeys("non_existing_doc", "$..a") == []
+
+ assert client.json().objkeys("doc1", "$..nowhere") == []
@pytest.mark.redismod
@@ -845,12 +853,11 @@ def test_objlen_dollar(client):
# Test single
assert client.json().objlen("doc1", "$.nested1.a") == [2]
- # Test missing key
- assert client.json().objlen("non_existing_doc", "$..a") is None
-
- # Test missing path
+ # Test missing key, and path
with pytest.raises(exceptions.ResponseError):
- client.json().objlen("doc1", "$.nowhere")
+ client.json().objlen("non_existing_doc", "$..a")
+
+ assert client.json().objlen("doc1", "$.nowhere") == []
# Test legacy
assert client.json().objlen("doc1", ".*.a") == 2
@@ -862,8 +869,8 @@ def test_objlen_dollar(client):
assert client.json().objlen("non_existing_doc", "..a") is None
# Test missing path
- with pytest.raises(exceptions.ResponseError):
- client.json().objlen("doc1", ".nowhere")
+ # with pytest.raises(exceptions.ResponseError):
+ client.json().objlen("doc1", ".nowhere")
@pytest.mark.redismod
@@ -1143,11 +1150,11 @@ def test_resp_dollar(client):
]
# Test missing path
- with pytest.raises(exceptions.ResponseError):
- client.json().resp("doc1", "$.nowhere")
+ client.json().resp("doc1", "$.nowhere")
# Test missing key
- assert client.json().resp("non_existing_doc", "$..a") is None
+ # with pytest.raises(exceptions.ResponseError):
+ client.json().resp("non_existing_doc", "$..a")
@pytest.mark.redismod
@@ -1391,6 +1398,7 @@ def test_arrindex_dollar(client):
"None") == 0
+@pytest.mark.redismod
def test_decoders_and_unstring():
assert unstring("4") == 4
assert unstring("45.55") == 45.55
diff --git a/tests/test_lock.py b/tests/test_lock.py
index fa76385..66148ed 100644
--- a/tests/test_lock.py
+++ b/tests/test_lock.py
@@ -7,6 +7,7 @@ from redis.lock import Lock
from .conftest import _get_client
+@pytest.mark.onlynoncluster
class TestLock:
@pytest.fixture()
def r_decoded(self, request):
@@ -220,6 +221,7 @@ class TestLock:
lock.reacquire()
+@pytest.mark.onlynoncluster
class TestLockClassSelection:
def test_lock_class_argument(self, r):
class MyLock:
diff --git a/tests/test_monitor.py b/tests/test_monitor.py
index a8a535b..6c3ea33 100644
--- a/tests/test_monitor.py
+++ b/tests/test_monitor.py
@@ -1,3 +1,4 @@
+import pytest
from .conftest import (
skip_if_redis_enterprise,
skip_ifnot_redis_enterprise,
@@ -5,6 +6,7 @@ from .conftest import (
)
+@pytest.mark.onlynoncluster
class TestMonitor:
def test_wait_command_not_found(self, r):
"Make sure the wait_for_command func works when command is not found"
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 08bd40b..a759bc9 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -59,6 +59,7 @@ class TestPipeline:
assert r['b'] == b'b1'
assert r['c'] == b'c1'
+ @pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch(self, r):
r['a'] = 0
@@ -70,6 +71,7 @@ class TestPipeline:
pipe.set('a', int(a) + 1)
assert pipe.execute() == [True]
+ @pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch_failure(self, r):
r['a'] = 0
@@ -129,6 +131,7 @@ class TestPipeline:
assert pipe.set('z', 'zzz').execute() == [True]
assert r['z'] == b'zzz'
+ @pytest.mark.onlynoncluster
def test_transaction_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
@@ -143,6 +146,7 @@ class TestPipeline:
assert result[1] == []
assert result[2]
+ @pytest.mark.onlynoncluster
def test_pipeline_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
@@ -171,6 +175,7 @@ class TestPipeline:
assert pipe.set('z', 'zzz').execute() == [True]
assert r['z'] == b'zzz'
+ @pytest.mark.onlynoncluster
def test_parse_error_raised_transaction(self, r):
with r.pipeline() as pipe:
pipe.multi()
@@ -186,6 +191,7 @@ class TestPipeline:
assert pipe.set('z', 'zzz').execute() == [True]
assert r['z'] == b'zzz'
+ @pytest.mark.onlynoncluster
def test_watch_succeed(self, r):
r['a'] = 1
r['b'] = 2
@@ -203,6 +209,7 @@ class TestPipeline:
assert pipe.execute() == [True]
assert not pipe.watching
+ @pytest.mark.onlynoncluster
def test_watch_failure(self, r):
r['a'] = 1
r['b'] = 2
@@ -217,6 +224,7 @@ class TestPipeline:
assert not pipe.watching
+ @pytest.mark.onlynoncluster
def test_watch_failure_in_empty_transaction(self, r):
r['a'] = 1
r['b'] = 2
@@ -230,6 +238,7 @@ class TestPipeline:
assert not pipe.watching
+ @pytest.mark.onlynoncluster
def test_unwatch(self, r):
r['a'] = 1
r['b'] = 2
@@ -242,6 +251,7 @@ class TestPipeline:
pipe.get('a')
assert pipe.execute() == [b'1']
+ @pytest.mark.onlynoncluster
def test_watch_exec_no_unwatch(self, r):
r['a'] = 1
r['b'] = 2
@@ -262,6 +272,7 @@ class TestPipeline:
unwatch_command = wait_for_command(r, m, 'UNWATCH')
assert unwatch_command is None, "should not send UNWATCH"
+ @pytest.mark.onlynoncluster
def test_watch_reset_unwatch(self, r):
r['a'] = 1
@@ -276,6 +287,7 @@ class TestPipeline:
assert unwatch_command is not None
assert unwatch_command['command'] == 'UNWATCH'
+ @pytest.mark.onlynoncluster
def test_transaction_callable(self, r):
r['a'] = 1
r['b'] = 2
@@ -300,6 +312,7 @@ class TestPipeline:
assert result == [True]
assert r['c'] == b'4'
+ @pytest.mark.onlynoncluster
def test_transaction_callable_returns_value_from_callable(self, r):
def callback(pipe):
# No need to do anything here since we only want the return value
@@ -354,6 +367,7 @@ class TestPipeline:
assert pipe == pipe2
assert response == [True, [0, 0, 15, 15, 14], b'1']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.0.0')
def test_pipeline_discard(self, r):
diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
index e242459..95513a0 100644
--- a/tests/test_pubsub.py
+++ b/tests/test_pubsub.py
@@ -123,6 +123,7 @@ class TestPubSubSubscribeUnsubscribe:
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_resubscribe_on_reconnection(**kwargs)
+ @pytest.mark.onlynoncluster
def test_resubscribe_to_patterns_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_resubscribe_on_reconnection(**kwargs)
@@ -177,6 +178,7 @@ class TestPubSubSubscribeUnsubscribe:
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribed_property(**kwargs)
+ @pytest.mark.onlynoncluster
def test_subscribe_property_with_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribed_property(**kwargs)
@@ -220,6 +222,7 @@ class TestPubSubSubscribeUnsubscribe:
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_sub_unsub_resub(**kwargs)
+ @pytest.mark.onlynoncluster
def test_sub_unsub_resub_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_sub_unsub_resub(**kwargs)
@@ -307,6 +310,7 @@ class TestPubSubMessages:
assert wait_for_message(p) is None
assert self.message == make_message('message', 'foo', 'test message')
+ @pytest.mark.onlynoncluster
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{'f*': self.message_handler})
@@ -326,6 +330,9 @@ class TestPubSubMessages:
assert wait_for_message(p) is None
assert self.message == make_message('message', channel, 'test message')
+ @pytest.mark.onlynoncluster
+ # see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
+ # #known-limitations-with-pubsub
def test_unicode_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
pattern = 'uni' + chr(4456) + '*'
@@ -401,6 +408,7 @@ class TestPubSubAutoDecoding:
self.channel,
self.data)
+ @pytest.mark.onlynoncluster
def test_pattern_publish(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
@@ -473,6 +481,7 @@ class TestPubSubRedisDown:
class TestPubSubSubcommands:
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_pubsub_channels(self, r):
p = r.pubsub()
@@ -482,6 +491,7 @@ class TestPubSubSubcommands:
expected = [b'bar', b'baz', b'foo', b'quux']
assert all([channel in r.pubsub_channels() for channel in expected])
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_pubsub_numsub(self, r):
p1 = r.pubsub()
@@ -497,7 +507,7 @@ class TestPubSubSubcommands:
assert wait_for_message(p3)['type'] == 'subscribe'
channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
- assert channels == r.pubsub_numsub('foo', 'bar', 'baz')
+ assert r.pubsub_numsub('foo', 'bar', 'baz') == channels
@skip_if_server_version_lt('2.8.0')
def test_pubsub_numpat(self, r):
@@ -529,6 +539,7 @@ class TestPubSubPings:
pattern=None)
+@pytest.mark.onlynoncluster
class TestPubSubConnectionKilled:
@skip_if_server_version_lt('3.0.0')
diff --git a/tests/test_scripting.py b/tests/test_scripting.py
index 352f3ba..7614b12 100644
--- a/tests/test_scripting.py
+++ b/tests/test_scripting.py
@@ -22,6 +22,7 @@ return "hello " .. name
"""
+@pytest.mark.onlynoncluster
class TestScripting:
@pytest.fixture(autouse=True)
def reset_scripts(self, r):
diff --git a/tests/test_search.py b/tests/test_search.py
index d1fc75f..b65ac8d 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -82,8 +82,8 @@ def createIndex(client, num_docs=100, definition=None):
try:
client.create_index(
(TextField("play", weight=5.0),
- TextField("txt"),
- NumericField("chapter")),
+ TextField("txt"),
+ NumericField("chapter")),
definition=definition,
)
except redis.ResponseError:
@@ -320,8 +320,8 @@ def test_stopwords(client):
def test_filters(client):
client.ft().create_index(
(TextField("txt"),
- NumericField("num"),
- GeoField("loc"))
+ NumericField("num"),
+ GeoField("loc"))
)
client.ft().add_document(
"doc1",
@@ -379,7 +379,7 @@ def test_payloads_with_no_content(client):
def test_sort_by(client):
client.ft().create_index(
(TextField("txt"),
- NumericField("num", sortable=True))
+ NumericField("num", sortable=True))
)
client.ft().add_document("doc1", txt="foo bar", num=1)
client.ft().add_document("doc2", txt="foo baz", num=2)
@@ -424,7 +424,7 @@ def test_example(client):
# Creating the index definition and schema
client.ft().create_index(
(TextField("title", weight=5.0),
- TextField("body"))
+ TextField("body"))
)
# Indexing a document
@@ -552,8 +552,8 @@ def test_no_index(client):
def test_partial(client):
client.ft().create_index(
(TextField("f1"),
- TextField("f2"),
- TextField("f3"))
+ TextField("f2"),
+ TextField("f3"))
)
client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
@@ -574,8 +574,8 @@ def test_partial(client):
def test_no_create(client):
client.ft().create_index(
(TextField("f1"),
- TextField("f2"),
- TextField("f3"))
+ TextField("f2"),
+ TextField("f3"))
)
client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
@@ -604,8 +604,8 @@ def test_no_create(client):
def test_explain(client):
client.ft().create_index(
(TextField("f1"),
- TextField("f2"),
- TextField("f3"))
+ TextField("f2"),
+ TextField("f3"))
)
res = client.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val")
assert res
@@ -629,8 +629,8 @@ def test_summarize(client):
doc = sorted(client.ft().search(q).docs)[0]
assert "<b>Henry</b> IV" == doc.play
assert (
- "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc.txt
+ "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
+ == doc.txt
)
q = Query("king henry").paging(0, 1).summarize().highlight()
@@ -638,8 +638,8 @@ def test_summarize(client):
doc = sorted(client.ft().search(q).docs)[0]
assert "<b>Henry</b> ... " == doc.play
assert (
- "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc.txt
+ "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
+ == doc.txt
)
@@ -812,10 +812,10 @@ def test_spell_check(client):
res = client.ft().spellcheck("lorm", include="dict")
assert len(res["lorm"]) == 3
assert (
- res["lorm"][0]["suggestion"],
- res["lorm"][1]["suggestion"],
- res["lorm"][2]["suggestion"],
- ) == ("lorem", "lore", "lorm")
+ res["lorm"][0]["suggestion"],
+ res["lorm"][1]["suggestion"],
+ res["lorm"][2]["suggestion"],
+ ) == ("lorem", "lore", "lorm")
assert (res["lorm"][0]["score"], res["lorm"][1]["score"]) == ("0.5", "0")
# test spellcheck exclude
@@ -873,7 +873,7 @@ def test_scorer(client):
)
client.ft().add_document(
"doc2",
- description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.", # noqa
+ description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.", # noqa
)
# default scorer is TFIDF
@@ -930,7 +930,7 @@ def test_config(client):
@pytest.mark.redismod
-def test_aggregations(client):
+def test_aggregations_groupby(client):
# Creating the index definition and schema
client.ft().create_index(
(
@@ -967,36 +967,242 @@ def test_aggregations(client):
req = aggregations.AggregateRequest("redis").group_by(
"@parent",
reducers.count(),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.count_distinct("@title"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.count_distinctish("@title"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.sum("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "21" # 10+8+3
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.min("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3" # min(10,8,3)
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.max("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "10" # max(10,8,3)
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.avg("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "7" # (10+3+8)/3
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.stddev("random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3.60555127546"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.quantile("@random_num", 0.5),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "10"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.tolist("@title"),
- reducers.first_value("@title"),
- reducers.random_sample("@title", 2),
)
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == ["RediSearch", "RedisAI", "RedisJson"]
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
+ reducers.first_value("@title").alias("first"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res == ['parent', 'redis', 'first', 'RediSearch']
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
+ reducers.random_sample("@title", 2).alias("random"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[2] == "random"
+ assert len(res[3]) == 2
+ assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"]
+
+
+@pytest.mark.redismod
+def test_aggregations_sort_by_and_limit(client):
+ client.ft().create_index(
+ (
+ TextField("t1"),
+ TextField("t2"),
+ )
+ )
+
+ client.ft().client.hset("doc1", mapping={'t1': 'a', 't2': 'b'})
+ client.ft().client.hset("doc2", mapping={'t1': 'b', 't2': 'a'})
+
+ # test sort_by using SortDirection
+ req = aggregations.AggregateRequest("*") \
+ .sort_by(aggregations.Asc("@t2"), aggregations.Desc("@t1"))
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t2', 'a', 't1', 'b']
+ assert res.rows[1] == ['t2', 'b', 't1', 'a']
+
+ # test sort_by without SortDirection
+ req = aggregations.AggregateRequest("*") \
+ .sort_by("@t1")
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t1', 'a']
+ assert res.rows[1] == ['t1', 'b']
+
+ # test sort_by with max
+ req = aggregations.AggregateRequest("*") \
+ .sort_by("@t1", max=1)
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 1
+
+ # test limit
+ req = aggregations.AggregateRequest("*") \
+ .sort_by("@t1").limit(1, 1)
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 1
+ assert res.rows[0] == ['t1', 'b']
+
+
+@pytest.mark.redismod
+def test_aggregations_load(client):
+ client.ft().create_index(
+ (
+ TextField("t1"),
+ TextField("t2"),
+ )
+ )
+
+ client.ft().client.hset("doc1", mapping={'t1': 'hello', 't2': 'world'})
+
+ # load t1
+ req = aggregations.AggregateRequest("*").load("t1")
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t1', 'hello']
+
+ # load t2
+ req = aggregations.AggregateRequest("*").load("t2")
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t2', 'world']
+
+
+@pytest.mark.redismod
+def test_aggregations_apply(client):
+ client.ft().create_index(
+ (
+ TextField("PrimaryKey", sortable=True),
+ NumericField("CreatedDateTimeUTC", sortable=True),
+ )
+ )
+
+ client.ft().client.hset(
+ "doc1",
+ mapping={
+ 'PrimaryKey': '9::362330',
+ 'CreatedDateTimeUTC': '637387878524969984'
+ }
+ )
+ client.ft().client.hset(
+ "doc2",
+ mapping={
+ 'PrimaryKey': '9::362329',
+ 'CreatedDateTimeUTC': '637387875859270016'
+ }
+ )
+
+ req = aggregations.AggregateRequest("*") \
+ .apply(CreatedDateTimeUTC='@CreatedDateTimeUTC * 10')
res = client.ft().aggregate(req)
+ assert res.rows[0] == ['CreatedDateTimeUTC', '6373878785249699840']
+ assert res.rows[1] == ['CreatedDateTimeUTC', '6373878758592700416']
- res = res.rows[0]
- assert len(res) == 26
- assert "redis" == res[1]
- assert "3" == res[3]
- assert "3" == res[5]
- assert "3" == res[7]
- assert "21" == res[9]
- assert "3" == res[11]
- assert "10" == res[13]
- assert "7" == res[15]
- assert "3.60555127546" == res[17]
- assert "10" == res[19]
- assert ["RediSearch", "RedisAI", "RedisJson"] == res[21]
- assert "RediSearch" == res[23]
- assert 2 == len(res[25])
+
+@pytest.mark.redismod
+def test_aggregations_filter(client):
+ client.ft().create_index(
+ (
+ TextField("name", sortable=True),
+ NumericField("age", sortable=True),
+ )
+ )
+
+ client.ft().client.hset(
+ "doc1",
+ mapping={
+ 'name': 'bar',
+ 'age': '25'
+ }
+ )
+ client.ft().client.hset(
+ "doc2",
+ mapping={
+ 'name': 'foo',
+ 'age': '19'
+ }
+ )
+
+ req = aggregations.AggregateRequest("*") \
+ .filter("@name=='foo' && @age < 20")
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 1
+ assert res.rows[0] == ['name', 'foo', 'age', '19']
+
+ req = aggregations.AggregateRequest("*") \
+ .filter("@age > 15").sort_by("@age")
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 2
+ assert res.rows[0] == ['age', '19']
+ assert res.rows[1] == ['age', '25']
@pytest.mark.redismod
@@ -1020,25 +1226,25 @@ def test_index_definition(client):
)
assert [
- "ON",
- "JSON",
- "PREFIX",
- 2,
- "hset:",
- "henry",
- "FILTER",
- "@f1==32",
- "LANGUAGE_FIELD",
- "play",
- "LANGUAGE",
- "English",
- "SCORE_FIELD",
- "chapter",
- "SCORE",
- 0.5,
- "PAYLOAD_FIELD",
- "txt",
- ] == definition.args
+ "ON",
+ "JSON",
+ "PREFIX",
+ 2,
+ "hset:",
+ "henry",
+ "FILTER",
+ "@f1==32",
+ "LANGUAGE_FIELD",
+ "play",
+ "LANGUAGE",
+ "English",
+ "SCORE_FIELD",
+ "chapter",
+ "SCORE",
+ 0.5,
+ "PAYLOAD_FIELD",
+ "txt",
+ ] == definition.args
createIndex(client.ft(), num_docs=500, definition=definition)
@@ -1313,3 +1519,46 @@ def test_json_with_jsonpath(client):
assert res.docs[0].id == "doc:1"
with pytest.raises(Exception):
res.docs[0].name_unsupported
+
+
+@pytest.mark.redismod
+def test_profile(client):
+ client.ft().create_index((TextField('t'),))
+ client.ft().client.hset('1', 't', 'hello')
+ client.ft().client.hset('2', 't', 'world')
+
+ # check using Query
+ q = Query('hello|world').no_content()
+ res, det = client.ft().profile(q)
+ assert det['Iterators profile']['Counter'] == 2.0
+ assert len(det['Iterators profile']['Child iterators']) == 2
+ assert det['Iterators profile']['Type'] == 'UNION'
+ assert det['Parsing time'] < 0.3
+ assert len(res.docs) == 2 # check also the search result
+
+ # check using AggregateRequest
+ req = aggregations.AggregateRequest("*").load("t")\
+ .apply(prefix="startswith(@t, 'hel')")
+ res, det = client.ft().profile(req)
+ assert det['Iterators profile']['Counter'] == 2.0
+ assert det['Iterators profile']['Type'] == 'WILDCARD'
+ assert det['Parsing time'] < 0.3
+ assert len(res.rows) == 2 # check also the search result
+
+
+@pytest.mark.redismod
+def test_profile_limited(client):
+ client.ft().create_index((TextField('t'),))
+ client.ft().client.hset('1', 't', 'hello')
+ client.ft().client.hset('2', 't', 'hell')
+ client.ft().client.hset('3', 't', 'help')
+ client.ft().client.hset('4', 't', 'helowa')
+
+ q = Query('%hell% hel*')
+ res, det = client.ft().profile(q, limited=True)
+ assert det['Iterators profile']['Child iterators'][0]['Child iterators'] \
+ == 'The number of iterators in the union is 3'
+ assert det['Iterators profile']['Child iterators'][1]['Child iterators'] \
+ == 'The number of iterators in the union is 4'
+ assert det['Iterators profile']['Type'] == 'INTERSECT'
+ assert len(res.docs) == 3 # check also the search result
diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py
index 7f3ff0a..9377d5b 100644
--- a/tests/test_sentinel.py
+++ b/tests/test_sentinel.py
@@ -81,16 +81,19 @@ def sentinel(request, cluster):
return Sentinel([('foo', 26379), ('bar', 26379)])
+@pytest.mark.onlynoncluster
def test_discover_master(sentinel, master_ip):
address = sentinel.discover_master('mymaster')
assert address == (master_ip, 6379)
+@pytest.mark.onlynoncluster
def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
sentinel.discover_master('xxx')
+@pytest.mark.onlynoncluster
def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_down.add(('foo', 26379))
@@ -100,6 +103,7 @@ def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
assert sentinel.sentinels[0].id == ('bar', 26379)
+@pytest.mark.onlynoncluster
def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_timeout.add(('foo', 26379))
@@ -109,6 +113,7 @@ def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
assert sentinel.sentinels[0].id == ('bar', 26379)
+@pytest.mark.onlynoncluster
def test_master_min_other_sentinels(cluster, master_ip):
sentinel = Sentinel([('foo', 26379)], min_other_sentinels=1)
# min_other_sentinels
@@ -119,18 +124,21 @@ def test_master_min_other_sentinels(cluster, master_ip):
assert address == (master_ip, 6379)
+@pytest.mark.onlynoncluster
def test_master_odown(cluster, sentinel):
cluster.master['is_odown'] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master('mymaster')
+@pytest.mark.onlynoncluster
def test_master_sdown(cluster, sentinel):
cluster.master['is_sdown'] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master('mymaster')
+@pytest.mark.onlynoncluster
def test_discover_slaves(cluster, sentinel):
assert sentinel.discover_slaves('mymaster') == []
@@ -165,6 +173,7 @@ def test_discover_slaves(cluster, sentinel):
('slave0', 1234), ('slave1', 1234)]
+@pytest.mark.onlynoncluster
def test_master_for(cluster, sentinel, master_ip):
master = sentinel.master_for('mymaster', db=9)
assert master.ping()
@@ -175,6 +184,7 @@ def test_master_for(cluster, sentinel, master_ip):
assert master.ping()
+@pytest.mark.onlynoncluster
def test_slave_for(cluster, sentinel):
cluster.slaves = [
{'ip': '127.0.0.1', 'port': 6379,
@@ -184,6 +194,7 @@ def test_slave_for(cluster, sentinel):
assert slave.ping()
+@pytest.mark.onlynoncluster
def test_slave_for_slave_not_found_error(cluster, sentinel):
cluster.master['is_odown'] = True
slave = sentinel.slave_for('mymaster', db=9)
@@ -191,6 +202,7 @@ def test_slave_for_slave_not_found_error(cluster, sentinel):
slave.ping()
+@pytest.mark.onlynoncluster
def test_slave_round_robin(cluster, sentinel, master_ip):
cluster.slaves = [
{'ip': 'slave0', 'port': 6379, 'is_odown': False, 'is_sdown': False},
@@ -206,14 +218,17 @@ def test_slave_round_robin(cluster, sentinel, master_ip):
next(rotator)
+@pytest.mark.onlynoncluster
def test_ckquorum(cluster, sentinel):
assert sentinel.sentinel_ckquorum("mymaster")
+@pytest.mark.onlynoncluster
def test_flushconfig(cluster, sentinel):
assert sentinel.sentinel_flushconfig()
+@pytest.mark.onlynoncluster
def test_reset(cluster, sentinel):
cluster.master['is_odown'] = True
assert sentinel.sentinel_reset('mymaster')
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index 99c6083..c0fb09e 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -565,7 +565,6 @@ def test_query_index(client):
@pytest.mark.redismod
-@pytest.mark.pipeline
def test_pipeline(client):
pipeline = client.ts().pipeline()
pipeline.create("with_pipeline")