summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/conftest.py168
-rw-r--r--tests/test_cluster.py1749
-rw-r--r--tests/test_command_parser.py100
-rw-r--r--tests/test_commands.py4464
-rw-r--r--tests/test_connection.py23
-rw-r--r--tests/test_connection_pool.py569
-rw-r--r--tests/test_encoding.py81
-rw-r--r--tests/test_helpers.py59
-rw-r--r--tests/test_json.py106
-rw-r--r--tests/test_lock.py117
-rw-r--r--tests/test_monitor.py47
-rw-r--r--tests/test_multiprocessing.py78
-rw-r--r--tests/test_pipeline.py289
-rw-r--r--tests/test_pubsub.py340
-rw-r--r--tests/test_retry.py7
-rw-r--r--tests/test_scripting.py58
-rw-r--r--tests/test_search.py410
-rw-r--r--tests/test_sentinel.py120
-rw-r--r--tests/test_timeseries.py115
19 files changed, 4558 insertions, 4342 deletions
diff --git a/tests/conftest.py b/tests/conftest.py
index 8ed39ab..24783c0 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,15 +1,16 @@
-from redis.backoff import NoBackoff
-from redis.retry import Retry
-import pytest
import random
-import redis
import time
from distutils.version import LooseVersion
-from redis.connection import parse_url
-from redis.exceptions import RedisClusterException
from unittest.mock import Mock
from urllib.parse import urlparse
+import pytest
+
+import redis
+from redis.backoff import NoBackoff
+from redis.connection import parse_url
+from redis.exceptions import RedisClusterException
+from redis.retry import Retry
REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/9"
@@ -19,29 +20,37 @@ default_cluster_nodes = 6
def pytest_addoption(parser):
- parser.addoption('--redis-url', default=default_redis_url,
- action="store",
- help="Redis connection string,"
- " defaults to `%(default)s`")
-
- parser.addoption('--redismod-url', default=default_redismod_url,
- action="store",
- help="Connection string to redis server"
- " with loaded modules,"
- " defaults to `%(default)s`")
-
- parser.addoption('--redis-cluster-nodes', default=default_cluster_nodes,
- action="store",
- help="The number of cluster nodes that need to be "
- "available before the test can start,"
- " defaults to `%(default)s`")
+ parser.addoption(
+ "--redis-url",
+ default=default_redis_url,
+ action="store",
+ help="Redis connection string," " defaults to `%(default)s`",
+ )
+
+ parser.addoption(
+ "--redismod-url",
+ default=default_redismod_url,
+ action="store",
+ help="Connection string to redis server"
+ " with loaded modules,"
+ " defaults to `%(default)s`",
+ )
+
+ parser.addoption(
+ "--redis-cluster-nodes",
+ default=default_cluster_nodes,
+ action="store",
+ help="The number of cluster nodes that need to be "
+ "available before the test can start,"
+ " defaults to `%(default)s`",
+ )
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
cmds = [command.upper() for command in client.command().keys()]
- if 'dping' in cmds:
+ if "dping" in cmds:
info["enterprise"] = True
else:
info["enterprise"] = False
@@ -102,42 +111,39 @@ def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=20):
available_nodes = 0 if client is None else len(client.get_nodes())
raise RedisClusterException(
f"The cluster did not become available after {timeout} seconds. "
- f"Only {available_nodes} nodes out of {cluster_nodes} are available")
+ f"Only {available_nodes} nodes out of {cluster_nodes} are available"
+ )
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
check = LooseVersion(redis_version) < LooseVersion(min_version)
- return pytest.mark.skipif(
- check,
- reason=f"Redis version required >= {min_version}")
+ return pytest.mark.skipif(check, reason=f"Redis version required >= {min_version}")
def skip_if_server_version_gte(min_version):
redis_version = REDIS_INFO["version"]
check = LooseVersion(redis_version) >= LooseVersion(min_version)
- return pytest.mark.skipif(
- check,
- reason=f"Redis version required < {min_version}")
+ return pytest.mark.skipif(check, reason=f"Redis version required < {min_version}")
def skip_unless_arch_bits(arch_bits):
- return pytest.mark.skipif(REDIS_INFO["arch_bits"] != arch_bits,
- reason=f"server is not {arch_bits}-bit")
+ return pytest.mark.skipif(
+ REDIS_INFO["arch_bits"] != arch_bits, reason=f"server is not {arch_bits}-bit"
+ )
def skip_ifmodversion_lt(min_version: str, module_name: str):
try:
modules = REDIS_INFO["modules"]
except KeyError:
- return pytest.mark.skipif(True,
- reason="Redis server does not have modules")
+ return pytest.mark.skipif(True, reason="Redis server does not have modules")
if modules == []:
return pytest.mark.skipif(True, reason="No redis modules found")
for j in modules:
- if module_name == j.get('name'):
- version = j.get('ver')
+ if module_name == j.get("name"):
+ version = j.get("ver")
mv = int(min_version.replace(".", ""))
check = version < mv
return pytest.mark.skipif(check, reason="Redis module version")
@@ -155,9 +161,9 @@ def skip_ifnot_redis_enterprise(func):
return pytest.mark.skipif(check, reason="Not running in redis enterprise")
-def _get_client(cls, request, single_connection_client=True, flushdb=True,
- from_url=None,
- **kwargs):
+def _get_client(
+ cls, request, single_connection_client=True, flushdb=True, from_url=None, **kwargs
+):
"""
Helper for fixtures or tests that need a Redis client
@@ -181,6 +187,7 @@ def _get_client(cls, request, single_connection_client=True, flushdb=True,
if single_connection_client:
client = client.client()
if request:
+
def teardown():
if not cluster_mode:
if flushdb:
@@ -194,6 +201,7 @@ def _get_client(cls, request, single_connection_client=True, flushdb=True,
client.connection_pool.disconnect()
else:
cluster_teardown(client, flushdb)
+
request.addfinalizer(teardown)
return client
@@ -201,11 +209,11 @@ def _get_client(cls, request, single_connection_client=True, flushdb=True,
def cluster_teardown(client, flushdb):
if flushdb:
try:
- client.flushdb(target_nodes='primaries')
+ client.flushdb(target_nodes="primaries")
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
- client.flushdb(target_nodes='primaries')
+ client.flushdb(target_nodes="primaries")
client.close()
client.disconnect_connection_pools()
@@ -214,9 +222,10 @@ def cluster_teardown(client, flushdb):
# an index on db != 0 raises a ResponseError in redis
@pytest.fixture()
def modclient(request, **kwargs):
- rmurl = request.config.getoption('--redismod-url')
- with _get_client(redis.Redis, request, from_url=rmurl,
- decode_responses=True, **kwargs) as client:
+ rmurl = request.config.getoption("--redismod-url")
+ with _get_client(
+ redis.Redis, request, from_url=rmurl, decode_responses=True, **kwargs
+ ) as client:
yield client
@@ -250,56 +259,61 @@ def _gen_cluster_mock_resp(r, response):
@pytest.fixture()
def mock_cluster_resp_ok(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
- return _gen_cluster_mock_resp(r, 'OK')
+ return _gen_cluster_mock_resp(r, "OK")
@pytest.fixture()
def mock_cluster_resp_int(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
- return _gen_cluster_mock_resp(r, '2')
+ return _gen_cluster_mock_resp(r, "2")
@pytest.fixture()
def mock_cluster_resp_info(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
- response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n'
- 'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n'
- 'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n'
- 'cluster_size:3\r\ncluster_current_epoch:7\r\n'
- 'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n'
- 'cluster_stats_messages_received:105653\r\n')
+ response = (
+ "cluster_state:ok\r\ncluster_slots_assigned:16384\r\n"
+ "cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n"
+ "cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n"
+ "cluster_size:3\r\ncluster_current_epoch:7\r\n"
+ "cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n"
+ "cluster_stats_messages_received:105653\r\n"
+ )
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_nodes(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
- response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
- 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
- '1447836263059 5 connected\n'
- '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
- 'master - 0 1447836264065 0 connected\n'
- 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
- 'myself,master - 0 0 2 connected 5461-10922\n'
- '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
- 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
- '1447836262556 3 connected\n'
- '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
- 'master - 0 1447836262555 7 connected 0-5460\n'
- '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
- 'master - 0 1447836263562 3 connected 10923-16383\n'
- 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
- 'master,fail - 1447829446956 1447829444948 1 disconnected\n'
- )
+ response = (
+ "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 "
+ "slave aa90da731f673a99617dfe930306549a09f83a6b 0 "
+ "1447836263059 5 connected\n"
+ "9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 "
+ "master - 0 1447836264065 0 connected\n"
+ "aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 "
+ "myself,master - 0 0 2 connected 5461-10922\n"
+ "1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
+ "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
+ "1447836262556 3 connected\n"
+ "4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 "
+ "master - 0 1447836262555 7 connected 0-5460\n"
+ "19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 "
+ "master - 0 1447836263562 3 connected 10923-16383\n"
+ "fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 "
+ "master,fail - 1447829446956 1447829444948 1 disconnected\n"
+ )
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_slaves(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
- response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
- "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
- "1447836789290 3 connected']")
+ response = (
+ "['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
+ "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
+ "1447836789290 3 connected']"
+ )
return _gen_cluster_mock_resp(r, response)
@@ -315,15 +329,15 @@ def wait_for_command(client, monitor, command):
# if we find a command with our key before the command we're waiting
# for, something went wrong
redis_version = REDIS_INFO["version"]
- if LooseVersion(redis_version) >= LooseVersion('5.0.0'):
+ if LooseVersion(redis_version) >= LooseVersion("5.0.0"):
id_str = str(client.client_id())
else:
- id_str = f'{random.randrange(2 ** 32):08x}'
- key = f'__REDIS-PY-{id_str}__'
+ id_str = f"{random.randrange(2 ** 32):08x}"
+ key = f"__REDIS-PY-{id_str}__"
client.get(key)
while True:
monitor_response = monitor.next_command()
- if command in monitor_response['command']:
+ if command in monitor_response["command"]:
return monitor_response
- if key in monitor_response['command']:
+ if key in monitor_response["command"]:
return None
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
index d12e47e..84d74bd 100644
--- a/tests/test_cluster.py
+++ b/tests/test_cluster.py
@@ -1,46 +1,47 @@
import binascii
import datetime
-import pytest
import warnings
-
from time import sleep
-from tests.test_pubsub import wait_for_message
-from unittest.mock import call, patch, DEFAULT, Mock
+from unittest.mock import DEFAULT, Mock, call, patch
+
+import pytest
+
from redis import Redis
-from redis.cluster import get_node_name, ClusterNode, \
- RedisCluster, NodesManager, PRIMARY, REDIS_CLUSTER_HASH_SLOTS, REPLICA
+from redis.cluster import (
+ PRIMARY,
+ REDIS_CLUSTER_HASH_SLOTS,
+ REPLICA,
+ ClusterNode,
+ NodesManager,
+ RedisCluster,
+ get_node_name,
+)
from redis.commands import CommandsParser
from redis.connection import Connection
-from redis.utils import str_if_bytes
+from redis.crc import key_slot
from redis.exceptions import (
AskError,
ClusterDownError,
DataError,
MovedError,
RedisClusterException,
- RedisError
+ RedisError,
)
+from redis.utils import str_if_bytes
+from tests.test_pubsub import wait_for_message
-from redis.crc import key_slot
-from .conftest import (
- _get_client,
- skip_if_server_version_lt,
- skip_unless_arch_bits
-)
+from .conftest import _get_client, skip_if_server_version_lt, skip_unless_arch_bits
default_host = "127.0.0.1"
default_port = 7000
default_cluster_slots = [
[
- 0, 8191,
- ['127.0.0.1', 7000, 'node_0'],
- ['127.0.0.1', 7003, 'node_3'],
+ 0,
+ 8191,
+ ["127.0.0.1", 7000, "node_0"],
+ ["127.0.0.1", 7003, "node_3"],
],
- [
- 8192, 16383,
- ['127.0.0.1', 7001, 'node_1'],
- ['127.0.0.1', 7002, 'node_2']
- ]
+ [8192, 16383, ["127.0.0.1", 7001, "node_1"], ["127.0.0.1", 7002, "node_2"]],
]
@@ -53,21 +54,20 @@ def slowlog(request, r):
to test it
"""
# Save old values
- current_config = r.config_get(
- target_nodes=r.get_primaries()[0])
- old_slower_than_value = current_config['slowlog-log-slower-than']
- old_max_legnth_value = current_config['slowlog-max-len']
+ current_config = r.config_get(target_nodes=r.get_primaries()[0])
+ old_slower_than_value = current_config["slowlog-log-slower-than"]
+ old_max_legnth_value = current_config["slowlog-max-len"]
# Function to restore the old values
def cleanup():
- r.config_set('slowlog-log-slower-than', old_slower_than_value)
- r.config_set('slowlog-max-len', old_max_legnth_value)
+ r.config_set("slowlog-log-slower-than", old_slower_than_value)
+ r.config_set("slowlog-max-len", old_max_legnth_value)
request.addfinalizer(cleanup)
# Set the new values
- r.config_set('slowlog-log-slower-than', 0)
- r.config_set('slowlog-max-len', 128)
+ r.config_set("slowlog-log-slower-than", 0)
+ r.config_set("slowlog-max-len", 128)
def get_mocked_redis_client(func=None, *args, **kwargs):
@@ -76,17 +76,18 @@ def get_mocked_redis_client(func=None, *args, **kwargs):
nodes and slots setup to remove the problem of different IP addresses
on different installations and machines.
"""
- cluster_slots = kwargs.pop('cluster_slots', default_cluster_slots)
- coverage_res = kwargs.pop('coverage_result', 'yes')
- with patch.object(Redis, 'execute_command') as execute_command_mock:
+ cluster_slots = kwargs.pop("cluster_slots", default_cluster_slots)
+ coverage_res = kwargs.pop("coverage_result", "yes")
+ with patch.object(Redis, "execute_command") as execute_command_mock:
+
def execute_command(*_args, **_kwargs):
- if _args[0] == 'CLUSTER SLOTS':
+ if _args[0] == "CLUSTER SLOTS":
mock_cluster_slots = cluster_slots
return mock_cluster_slots
- elif _args[0] == 'COMMAND':
- return {'get': [], 'set': []}
- elif _args[1] == 'cluster-require-full-coverage':
- return {'cluster-require-full-coverage': coverage_res}
+ elif _args[0] == "COMMAND":
+ return {"get": [], "set": []}
+ elif _args[1] == "cluster-require-full-coverage":
+ return {"cluster-require-full-coverage": coverage_res}
elif func is not None:
return func(*args, **kwargs)
else:
@@ -94,16 +95,21 @@ def get_mocked_redis_client(func=None, *args, **kwargs):
execute_command_mock.side_effect = execute_command
- with patch.object(CommandsParser, 'initialize',
- autospec=True) as cmd_parser_initialize:
+ with patch.object(
+ CommandsParser, "initialize", autospec=True
+ ) as cmd_parser_initialize:
def cmd_init_mock(self, r):
- self.commands = {'get': {'name': 'get', 'arity': 2,
- 'flags': ['readonly',
- 'fast'],
- 'first_key_pos': 1,
- 'last_key_pos': 1,
- 'step_count': 1}}
+ self.commands = {
+ "get": {
+ "name": "get",
+ "arity": 2,
+ "flags": ["readonly", "fast"],
+ "first_key_pos": 1,
+ "last_key_pos": 1,
+ "step_count": 1,
+ }
+ }
cmd_parser_initialize.side_effect = cmd_init_mock
@@ -138,21 +144,21 @@ def find_node_ip_based_on_port(cluster_client, port):
def moved_redirection_helper(request, failover=False):
"""
- Test that the client handles MOVED response after a failover.
- Redirection after a failover means that the redirection address is of a
- replica that was promoted to a primary.
+ Test that the client handles MOVED response after a failover.
+ Redirection after a failover means that the redirection address is of a
+ replica that was promoted to a primary.
- At first call it should return a MOVED ResponseError that will point
- the client to the next server it should talk to.
+ At first call it should return a MOVED ResponseError that will point
+ the client to the next server it should talk to.
- Verify that:
- 1. it tries to talk to the redirected node
- 2. it updates the slot's primary to the redirected node
+ Verify that:
+ 1. it tries to talk to the redirected node
+ 2. it updates the slot's primary to the redirected node
- For a failover, also verify:
- 3. the redirected node's server type updated to 'primary'
- 4. the server type of the previous slot owner updated to 'replica'
- """
+ For a failover, also verify:
+ 3. the redirected node's server type updated to 'primary'
+ 4. the server type of the previous slot owner updated to 'replica'
+ """
rc = _get_client(RedisCluster, request, flushdb=False)
slot = 12182
redirect_node = None
@@ -160,8 +166,7 @@ def moved_redirection_helper(request, failover=False):
prev_primary = rc.nodes_manager.get_node_from_slot(slot)
if failover:
if len(rc.nodes_manager.slots_cache[slot]) < 2:
- warnings.warn("Skipping this test since it requires to have a "
- "replica")
+ warnings.warn("Skipping this test since it requires to have a " "replica")
return
redirect_node = rc.nodes_manager.slots_cache[slot][1]
else:
@@ -169,7 +174,8 @@ def moved_redirection_helper(request, failover=False):
redirect_node = rc.get_primaries()[0]
r_host = redirect_node.host
r_port = redirect_node.port
- with patch.object(Redis, 'parse_response') as parse_response:
+ with patch.object(Redis, "parse_response") as parse_response:
+
def moved_redirect_effect(connection, *args, **options):
def ok_response(connection, *args, **options):
assert connection.host == r_host
@@ -201,8 +207,7 @@ class TestRedisClusterObj:
args
"""
cluster = get_mocked_redis_client(host=default_host, port=default_port)
- assert cluster.get_node(host=default_host,
- port=default_port) is not None
+ assert cluster.get_node(host=default_host, port=default_port) is not None
def test_startup_nodes(self):
"""
@@ -211,11 +216,15 @@ class TestRedisClusterObj:
"""
port_1 = 7000
port_2 = 7001
- startup_nodes = [ClusterNode(default_host, port_1),
- ClusterNode(default_host, port_2)]
+ startup_nodes = [
+ ClusterNode(default_host, port_1),
+ ClusterNode(default_host, port_2),
+ ]
cluster = get_mocked_redis_client(startup_nodes=startup_nodes)
- assert cluster.get_node(host=default_host, port=port_1) is not None \
- and cluster.get_node(host=default_host, port=port_2) is not None
+ assert (
+ cluster.get_node(host=default_host, port=port_1) is not None
+ and cluster.get_node(host=default_host, port=port_2) is not None
+ )
def test_empty_startup_nodes(self):
"""
@@ -225,19 +234,19 @@ class TestRedisClusterObj:
RedisCluster(startup_nodes=[])
assert str(ex.value).startswith(
- "RedisCluster requires at least one node to discover the "
- "cluster"), str_if_bytes(ex.value)
+ "RedisCluster requires at least one node to discover the " "cluster"
+ ), str_if_bytes(ex.value)
def test_from_url(self, r):
redis_url = f"redis://{default_host}:{default_port}/0"
- with patch.object(RedisCluster, 'from_url') as from_url:
+ with patch.object(RedisCluster, "from_url") as from_url:
+
def from_url_mocked(_url, **_kwargs):
return get_mocked_redis_client(url=_url, **_kwargs)
from_url.side_effect = from_url_mocked
cluster = RedisCluster.from_url(redis_url)
- assert cluster.get_node(host=default_host,
- port=default_port) is not None
+ assert cluster.get_node(host=default_host, port=default_port) is not None
def test_execute_command_errors(self, r):
"""
@@ -245,8 +254,9 @@ class TestRedisClusterObj:
"""
with pytest.raises(RedisClusterException) as ex:
r.execute_command("GET")
- assert str(ex.value).startswith("No way to dispatch this command to "
- "Redis Cluster. Missing key.")
+ assert str(ex.value).startswith(
+ "No way to dispatch this command to " "Redis Cluster. Missing key."
+ )
def test_execute_command_node_flag_primaries(self, r):
"""
@@ -254,7 +264,7 @@ class TestRedisClusterObj:
"""
primaries = r.get_primaries()
replicas = r.get_replicas()
- mock_all_nodes_resp(r, 'PONG')
+ mock_all_nodes_resp(r, "PONG")
assert r.ping(RedisCluster.PRIMARIES) is True
for primary in primaries:
conn = primary.redis_connection.connection
@@ -271,7 +281,7 @@ class TestRedisClusterObj:
if not replicas:
r = get_mocked_redis_client(default_host, default_port)
primaries = r.get_primaries()
- mock_all_nodes_resp(r, 'PONG')
+ mock_all_nodes_resp(r, "PONG")
assert r.ping(RedisCluster.REPLICAS) is True
for replica in replicas:
conn = replica.redis_connection.connection
@@ -284,7 +294,7 @@ class TestRedisClusterObj:
"""
Test command execution with nodes flag ALL_NODES
"""
- mock_all_nodes_resp(r, 'PONG')
+ mock_all_nodes_resp(r, "PONG")
assert r.ping(RedisCluster.ALL_NODES) is True
for node in r.get_nodes():
conn = node.redis_connection.connection
@@ -294,7 +304,7 @@ class TestRedisClusterObj:
"""
Test command execution with nodes flag RANDOM
"""
- mock_all_nodes_resp(r, 'PONG')
+ mock_all_nodes_resp(r, "PONG")
assert r.ping(RedisCluster.RANDOM) is True
called_count = 0
for node in r.get_nodes():
@@ -309,7 +319,7 @@ class TestRedisClusterObj:
default node
"""
def_node = r.get_default_node()
- mock_node_resp(def_node, 'PONG')
+ mock_node_resp(def_node, "PONG")
assert r.ping() is True
conn = def_node.redis_connection.connection
assert conn.read_response.called
@@ -324,7 +334,8 @@ class TestRedisClusterObj:
Important thing to verify is that it tries to talk to the second node.
"""
redirect_node = r.get_nodes()[0]
- with patch.object(Redis, 'parse_response') as parse_response:
+ with patch.object(Redis, "parse_response") as parse_response:
+
def ask_redirect_effect(connection, *args, **options):
def ok_response(connection, *args, **options):
assert connection.host == redirect_node.host
@@ -356,26 +367,22 @@ class TestRedisClusterObj:
Test making calls on specific nodes when the cluster has failed over to
another node
"""
- node_7006 = ClusterNode(host=default_host, port=7006,
- server_type=PRIMARY)
- node_7007 = ClusterNode(host=default_host, port=7007,
- server_type=PRIMARY)
- with patch.object(Redis, 'parse_response') as parse_response:
- with patch.object(NodesManager, 'initialize', autospec=True) as \
- initialize:
- with patch.multiple(Connection,
- send_command=DEFAULT,
- connect=DEFAULT,
- can_read=DEFAULT) as mocks:
+ node_7006 = ClusterNode(host=default_host, port=7006, server_type=PRIMARY)
+ node_7007 = ClusterNode(host=default_host, port=7007, server_type=PRIMARY)
+ with patch.object(Redis, "parse_response") as parse_response:
+ with patch.object(NodesManager, "initialize", autospec=True) as initialize:
+ with patch.multiple(
+ Connection, send_command=DEFAULT, connect=DEFAULT, can_read=DEFAULT
+ ) as mocks:
# simulate 7006 as a failed node
- def parse_response_mock(connection, command_name,
- **options):
+ def parse_response_mock(connection, command_name, **options):
if connection.port == 7006:
parse_response.failed_calls += 1
raise ClusterDownError(
- 'CLUSTERDOWN The cluster is '
- 'down. Use CLUSTER INFO for '
- 'more information')
+ "CLUSTERDOWN The cluster is "
+ "down. Use CLUSTER INFO for "
+ "more information"
+ )
elif connection.port == 7007:
parse_response.successful_calls += 1
@@ -391,8 +398,7 @@ class TestRedisClusterObj:
# After the first connection fails, a reinitialize
# should follow the cluster to 7007
def map_7007(self):
- self.nodes_cache = {
- node_7007.name: node_7007}
+ self.nodes_cache = {node_7007.name: node_7007}
self.default_node = node_7007
self.slots_cache = {}
@@ -406,44 +412,52 @@ class TestRedisClusterObj:
parse_response.successful_calls = 0
parse_response.failed_calls = 0
initialize.side_effect = initialize_mock
- mocks['can_read'].return_value = False
- mocks['send_command'].return_value = "MOCK_OK"
- mocks['connect'].return_value = None
- with patch.object(CommandsParser, 'initialize',
- autospec=True) as cmd_parser_initialize:
+ mocks["can_read"].return_value = False
+ mocks["send_command"].return_value = "MOCK_OK"
+ mocks["connect"].return_value = None
+ with patch.object(
+ CommandsParser, "initialize", autospec=True
+ ) as cmd_parser_initialize:
def cmd_init_mock(self, r):
- self.commands = {'get': {'name': 'get', 'arity': 2,
- 'flags': ['readonly',
- 'fast'],
- 'first_key_pos': 1,
- 'last_key_pos': 1,
- 'step_count': 1}}
+ self.commands = {
+ "get": {
+ "name": "get",
+ "arity": 2,
+ "flags": ["readonly", "fast"],
+ "first_key_pos": 1,
+ "last_key_pos": 1,
+ "step_count": 1,
+ }
+ }
cmd_parser_initialize.side_effect = cmd_init_mock
- rc = _get_client(
- RedisCluster, request, flushdb=False)
+ rc = _get_client(RedisCluster, request, flushdb=False)
assert len(rc.get_nodes()) == 1
- assert rc.get_node(node_name=node_7006.name) is not \
- None
+ assert rc.get_node(node_name=node_7006.name) is not None
- rc.get('foo')
+ rc.get("foo")
# Cluster should now point to 7007, and there should be
# one failed and one successful call
assert len(rc.get_nodes()) == 1
- assert rc.get_node(node_name=node_7007.name) is not \
- None
+ assert rc.get_node(node_name=node_7007.name) is not None
assert rc.get_node(node_name=node_7006.name) is None
assert parse_response.failed_calls == 1
assert parse_response.successful_calls == 1
def test_reading_from_replicas_in_round_robin(self):
- with patch.multiple(Connection, send_command=DEFAULT,
- read_response=DEFAULT, _connect=DEFAULT,
- can_read=DEFAULT, on_connect=DEFAULT) as mocks:
- with patch.object(Redis, 'parse_response') as parse_response:
+ with patch.multiple(
+ Connection,
+ send_command=DEFAULT,
+ read_response=DEFAULT,
+ _connect=DEFAULT,
+ can_read=DEFAULT,
+ on_connect=DEFAULT,
+ ) as mocks:
+ with patch.object(Redis, "parse_response") as parse_response:
+
def parse_response_mock_first(connection, *args, **options):
# Primary
assert connection.port == 7001
@@ -465,16 +479,16 @@ class TestRedisClusterObj:
# do want RedisCluster.on_connect function to get called,
# so we'll mock some of the Connection's functions to allow it
parse_response.side_effect = parse_response_mock_first
- mocks['send_command'].return_value = True
- mocks['read_response'].return_value = "OK"
- mocks['_connect'].return_value = True
- mocks['can_read'].return_value = False
- mocks['on_connect'].return_value = True
+ mocks["send_command"].return_value = True
+ mocks["read_response"].return_value = "OK"
+ mocks["_connect"].return_value = True
+ mocks["can_read"].return_value = False
+ mocks["on_connect"].return_value = True
# Create a cluster with reading from replications
- read_cluster = get_mocked_redis_client(host=default_host,
- port=default_port,
- read_from_replicas=True)
+ read_cluster = get_mocked_redis_client(
+ host=default_host, port=default_port, read_from_replicas=True
+ )
assert read_cluster.read_from_replicas is True
# Check that we read from the slot's nodes in a round robin
# matter.
@@ -483,7 +497,7 @@ class TestRedisClusterObj:
read_cluster.get("foo")
read_cluster.get("foo")
read_cluster.get("foo")
- mocks['send_command'].assert_has_calls([call('READONLY')])
+ mocks["send_command"].assert_has_calls([call("READONLY")])
def test_keyslot(self, r):
"""
@@ -503,8 +517,10 @@ class TestRedisClusterObj:
assert r.keyslot(b"abc") == r.keyslot("abc")
def test_get_node_name(self):
- assert get_node_name(default_host, default_port) == \
- f"{default_host}:{default_port}"
+ assert (
+ get_node_name(default_host, default_port)
+ == f"{default_host}:{default_port}"
+ )
def test_all_nodes(self, r):
"""
@@ -520,8 +536,11 @@ class TestRedisClusterObj:
Set a list of nodes with random primaries/replicas config and it shold
be possible to iterate over all of them.
"""
- nodes = [node for node in r.nodes_manager.nodes_cache.values()
- if node.server_type == PRIMARY]
+ nodes = [
+ node
+ for node in r.nodes_manager.nodes_cache.values()
+ if node.server_type == PRIMARY
+ ]
for node in r.get_primaries():
assert node in nodes
@@ -532,12 +551,14 @@ class TestRedisClusterObj:
command as many times as configured in cluster_error_retry_attempts
and then raise the exception
"""
- with patch.object(RedisCluster, '_execute_command') as execute_command:
+ with patch.object(RedisCluster, "_execute_command") as execute_command:
+
def raise_cluster_down_error(target_node, *args, **kwargs):
execute_command.failed_calls += 1
raise ClusterDownError(
- 'CLUSTERDOWN The cluster is down. Use CLUSTER INFO for '
- 'more information')
+ "CLUSTERDOWN The cluster is down. Use CLUSTER INFO for "
+ "more information"
+ )
execute_command.side_effect = raise_cluster_down_error
@@ -545,8 +566,7 @@ class TestRedisClusterObj:
with pytest.raises(ClusterDownError):
rc.get("bar")
- assert execute_command.failed_calls == \
- rc.cluster_error_retry_attempts
+ assert execute_command.failed_calls == rc.cluster_error_retry_attempts
def test_connection_error_overreaches_retry_attempts(self):
"""
@@ -554,7 +574,8 @@ class TestRedisClusterObj:
command as many times as configured in cluster_error_retry_attempts
and then raise the exception
"""
- with patch.object(RedisCluster, '_execute_command') as execute_command:
+ with patch.object(RedisCluster, "_execute_command") as execute_command:
+
def raise_conn_error(target_node, *args, **kwargs):
execute_command.failed_calls += 1
raise ConnectionError()
@@ -565,8 +586,7 @@ class TestRedisClusterObj:
with pytest.raises(ConnectionError):
rc.get("bar")
- assert execute_command.failed_calls == \
- rc.cluster_error_retry_attempts
+ assert execute_command.failed_calls == rc.cluster_error_retry_attempts
def test_user_on_connect_function(self, request):
"""
@@ -600,7 +620,7 @@ class TestRedisClusterObj:
test failed replacement of the default cluster node
"""
default_node = r.get_default_node()
- new_def_node = ClusterNode('1.1.1.1', 1111)
+ new_def_node = ClusterNode("1.1.1.1", 1111)
assert r.set_default_node(None) is False
assert r.set_default_node(new_def_node) is False
assert r.get_default_node() == default_node
@@ -609,7 +629,7 @@ class TestRedisClusterObj:
"""
Test that get_node_from_key function returns the correct node
"""
- key = 'bar'
+ key = "bar"
slot = r.keyslot(key)
slot_nodes = r.nodes_manager.slots_cache.get(slot)
primary = slot_nodes[0]
@@ -627,78 +647,79 @@ class TestClusterRedisCommands:
"""
def test_case_insensitive_command_names(self, r):
- assert r.cluster_response_callbacks['cluster addslots'] == \
- r.cluster_response_callbacks['CLUSTER ADDSLOTS']
+ assert (
+ r.cluster_response_callbacks["cluster addslots"]
+ == r.cluster_response_callbacks["CLUSTER ADDSLOTS"]
+ )
def test_get_and_set(self, r):
# get and set can't be tested independently of each other
- assert r.get('a') is None
- byte_string = b'value'
+ assert r.get("a") is None
+ byte_string = b"value"
integer = 5
- unicode_string = chr(3456) + 'abcd' + chr(3421)
- assert r.set('byte_string', byte_string)
- assert r.set('integer', 5)
- assert r.set('unicode_string', unicode_string)
- assert r.get('byte_string') == byte_string
- assert r.get('integer') == str(integer).encode()
- assert r.get('unicode_string').decode('utf-8') == unicode_string
+ unicode_string = chr(3456) + "abcd" + chr(3421)
+ assert r.set("byte_string", byte_string)
+ assert r.set("integer", 5)
+ assert r.set("unicode_string", unicode_string)
+ assert r.get("byte_string") == byte_string
+ assert r.get("integer") == str(integer).encode()
+ assert r.get("unicode_string").decode("utf-8") == unicode_string
def test_mget_nonatomic(self, r):
assert r.mget_nonatomic([]) == []
- assert r.mget_nonatomic(['a', 'b']) == [None, None]
- r['a'] = '1'
- r['b'] = '2'
- r['c'] = '3'
+ assert r.mget_nonatomic(["a", "b"]) == [None, None]
+ r["a"] = "1"
+ r["b"] = "2"
+ r["c"] = "3"
- assert (r.mget_nonatomic('a', 'other', 'b', 'c') ==
- [b'1', None, b'2', b'3'])
+ assert r.mget_nonatomic("a", "other", "b", "c") == [b"1", None, b"2", b"3"]
def test_mset_nonatomic(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
assert r.mset_nonatomic(d)
for k, v in d.items():
assert r[k] == v
def test_config_set(self, r):
- assert r.config_set('slowlog-log-slower-than', 0)
+ assert r.config_set("slowlog-log-slower-than", 0)
def test_cluster_config_resetstat(self, r):
- r.ping(target_nodes='all')
- all_info = r.info(target_nodes='all')
+ r.ping(target_nodes="all")
+ all_info = r.info(target_nodes="all")
prior_commands_processed = -1
for node_info in all_info.values():
- prior_commands_processed = node_info['total_commands_processed']
+ prior_commands_processed = node_info["total_commands_processed"]
assert prior_commands_processed >= 1
- r.config_resetstat(target_nodes='all')
- all_info = r.info(target_nodes='all')
+ r.config_resetstat(target_nodes="all")
+ all_info = r.info(target_nodes="all")
for node_info in all_info.values():
- reset_commands_processed = node_info['total_commands_processed']
+ reset_commands_processed = node_info["total_commands_processed"]
assert reset_commands_processed < prior_commands_processed
def test_client_setname(self, r):
node = r.get_random_node()
- r.client_setname('redis_py_test', target_nodes=node)
+ r.client_setname("redis_py_test", target_nodes=node)
client_name = r.client_getname(target_nodes=node)
- assert client_name == 'redis_py_test'
+ assert client_name == "redis_py_test"
def test_exists(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
r.mset_nonatomic(d)
assert r.exists(*d.keys()) == len(d)
def test_delete(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
r.mset_nonatomic(d)
assert r.delete(*d.keys()) == len(d)
assert r.delete(*d.keys()) == 0
def test_touch(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
r.mset_nonatomic(d)
assert r.touch(*d.keys()) == len(d)
def test_unlink(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
r.mset_nonatomic(d)
assert r.unlink(*d.keys()) == len(d)
# Unlink is non-blocking so we sleep before
@@ -718,7 +739,7 @@ class TestClusterRedisCommands:
p = r.pubsub(node)
pubsub_nodes.append(p)
p.subscribe(channel)
- b_channel = channel.encode('utf-8')
+ b_channel = channel.encode("utf-8")
channels.append(b_channel)
# Assert that each node returns only the channel it subscribed to
sub_channels = node.redis_connection.pubsub_channels()
@@ -730,7 +751,7 @@ class TestClusterRedisCommands:
i += 1
# Assert that the cluster's pubsub_channels function returns ALL of
# the cluster's channels
- result = r.pubsub_channels(target_nodes='all')
+ result = r.pubsub_channels(target_nodes="all")
result.sort()
assert result == channels
@@ -738,7 +759,7 @@ class TestClusterRedisCommands:
nodes = r.get_nodes()
pubsub_nodes = []
channel = "foo"
- b_channel = channel.encode('utf-8')
+ b_channel = channel.encode("utf-8")
for node in nodes:
# We will create different pubsub clients where each one is
# connected to a different node
@@ -753,8 +774,7 @@ class TestClusterRedisCommands:
assert sub_chann_num == [(b_channel, 1)]
# Assert that the cluster's pubsub_numsub function returns ALL clients
# subscribed to this channel in the entire cluster
- assert r.pubsub_numsub(channel, target_nodes='all') == \
- [(b_channel, len(nodes))]
+ assert r.pubsub_numsub(channel, target_nodes="all") == [(b_channel, len(nodes))]
def test_pubsub_numpat_merge_results(self, r):
nodes = r.get_nodes()
@@ -774,35 +794,35 @@ class TestClusterRedisCommands:
assert sub_num_pat == 1
# Assert that the cluster's pubsub_numsub function returns ALL clients
# subscribed to this channel in the entire cluster
- assert r.pubsub_numpat(target_nodes='all') == len(nodes)
+ assert r.pubsub_numpat(target_nodes="all") == len(nodes)
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_cluster_pubsub_channels(self, r):
p = r.pubsub()
- p.subscribe('foo', 'bar', 'baz', 'quux')
+ p.subscribe("foo", "bar", "baz", "quux")
for i in range(4):
- assert wait_for_message(p, timeout=0.5)['type'] == 'subscribe'
- expected = [b'bar', b'baz', b'foo', b'quux']
- assert all([channel in r.pubsub_channels(target_nodes='all')
- for channel in expected])
+ assert wait_for_message(p, timeout=0.5)["type"] == "subscribe"
+ expected = [b"bar", b"baz", b"foo", b"quux"]
+ assert all(
+ [channel in r.pubsub_channels(target_nodes="all") for channel in expected]
+ )
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_cluster_pubsub_numsub(self, r):
p1 = r.pubsub()
- p1.subscribe('foo', 'bar', 'baz')
+ p1.subscribe("foo", "bar", "baz")
for i in range(3):
- assert wait_for_message(p1, timeout=0.5)['type'] == 'subscribe'
+ assert wait_for_message(p1, timeout=0.5)["type"] == "subscribe"
p2 = r.pubsub()
- p2.subscribe('bar', 'baz')
+ p2.subscribe("bar", "baz")
for i in range(2):
- assert wait_for_message(p2, timeout=0.5)['type'] == 'subscribe'
+ assert wait_for_message(p2, timeout=0.5)["type"] == "subscribe"
p3 = r.pubsub()
- p3.subscribe('baz')
- assert wait_for_message(p3, timeout=0.5)['type'] == 'subscribe'
+ p3.subscribe("baz")
+ assert wait_for_message(p3, timeout=0.5)["type"] == "subscribe"
- channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
- assert r.pubsub_numsub('foo', 'bar', 'baz', target_nodes='all') \
- == channels
+ channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)]
+ assert r.pubsub_numsub("foo", "bar", "baz", target_nodes="all") == channels
def test_cluster_slots(self, r):
mock_all_nodes_resp(r, default_cluster_slots)
@@ -810,12 +830,11 @@ class TestClusterRedisCommands:
assert isinstance(cluster_slots, dict)
assert len(default_cluster_slots) == len(cluster_slots)
assert cluster_slots.get((0, 8191)) is not None
- assert cluster_slots.get((0, 8191)).get('primary') == \
- ('127.0.0.1', 7000)
+ assert cluster_slots.get((0, 8191)).get("primary") == ("127.0.0.1", 7000)
def test_cluster_addslots(self, r):
node = r.get_random_node()
- mock_node_resp(node, 'OK')
+ mock_node_resp(node, "OK")
assert r.cluster_addslots(node, 1, 2, 3) is True
def test_cluster_countkeysinslot(self, r):
@@ -825,22 +844,25 @@ class TestClusterRedisCommands:
def test_cluster_count_failure_report(self, r):
mock_all_nodes_resp(r, 0)
- assert r.cluster_count_failure_report('node_0') == 0
+ assert r.cluster_count_failure_report("node_0") == 0
def test_cluster_delslots(self):
cluster_slots = [
[
- 0, 8191,
- ['127.0.0.1', 7000, 'node_0'],
+ 0,
+ 8191,
+ ["127.0.0.1", 7000, "node_0"],
],
[
- 8192, 16383,
- ['127.0.0.1', 7001, 'node_1'],
- ]
+ 8192,
+ 16383,
+ ["127.0.0.1", 7001, "node_1"],
+ ],
]
- r = get_mocked_redis_client(host=default_host, port=default_port,
- cluster_slots=cluster_slots)
- mock_all_nodes_resp(r, 'OK')
+ r = get_mocked_redis_client(
+ host=default_host, port=default_port, cluster_slots=cluster_slots
+ )
+ mock_all_nodes_resp(r, "OK")
node0 = r.get_node(default_host, 7000)
node1 = r.get_node(default_host, 7001)
assert r.cluster_delslots(0, 8192) == [True, True]
@@ -849,59 +871,61 @@ class TestClusterRedisCommands:
def test_cluster_failover(self, r):
node = r.get_random_node()
- mock_node_resp(node, 'OK')
+ mock_node_resp(node, "OK")
assert r.cluster_failover(node) is True
- assert r.cluster_failover(node, 'FORCE') is True
- assert r.cluster_failover(node, 'TAKEOVER') is True
+ assert r.cluster_failover(node, "FORCE") is True
+ assert r.cluster_failover(node, "TAKEOVER") is True
with pytest.raises(RedisError):
- r.cluster_failover(node, 'FORCT')
+ r.cluster_failover(node, "FORCT")
def test_cluster_info(self, r):
info = r.cluster_info()
assert isinstance(info, dict)
- assert info['cluster_state'] == 'ok'
+ assert info["cluster_state"] == "ok"
def test_cluster_keyslot(self, r):
mock_all_nodes_resp(r, 12182)
- assert r.cluster_keyslot('foo') == 12182
+ assert r.cluster_keyslot("foo") == 12182
def test_cluster_meet(self, r):
node = r.get_default_node()
- mock_node_resp(node, 'OK')
- assert r.cluster_meet('127.0.0.1', 6379) is True
+ mock_node_resp(node, "OK")
+ assert r.cluster_meet("127.0.0.1", 6379) is True
def test_cluster_nodes(self, r):
response = (
- 'c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
- 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
- '1447836263059 5 connected\n'
- '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
- 'master - 0 1447836264065 0 connected\n'
- 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
- 'myself,master - 0 0 2 connected 5461-10922\n'
- '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
- 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
- '1447836262556 3 connected\n'
- '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
- 'master - 0 1447836262555 7 connected 0-5460\n'
- '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
- 'master - 0 1447836263562 3 connected 10923-16383\n'
- 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
- 'master,fail - 1447829446956 1447829444948 1 disconnected\n'
+ "c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 "
+ "slave aa90da731f673a99617dfe930306549a09f83a6b 0 "
+ "1447836263059 5 connected\n"
+ "9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 "
+ "master - 0 1447836264065 0 connected\n"
+ "aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 "
+ "myself,master - 0 0 2 connected 5461-10922\n"
+ "1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
+ "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
+ "1447836262556 3 connected\n"
+ "4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 "
+ "master - 0 1447836262555 7 connected 0-5460\n"
+ "19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 "
+ "master - 0 1447836263562 3 connected 10923-16383\n"
+ "fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 "
+ "master,fail - 1447829446956 1447829444948 1 disconnected\n"
)
mock_all_nodes_resp(r, response)
nodes = r.cluster_nodes()
assert len(nodes) == 7
- assert nodes.get('172.17.0.7:7006') is not None
- assert nodes.get('172.17.0.7:7006').get('node_id') == \
- "c8253bae761cb1ecb2b61857d85dfe455a0fec8b"
+ assert nodes.get("172.17.0.7:7006") is not None
+ assert (
+ nodes.get("172.17.0.7:7006").get("node_id")
+ == "c8253bae761cb1ecb2b61857d85dfe455a0fec8b"
+ )
def test_cluster_replicate(self, r):
node = r.get_random_node()
all_replicas = r.get_replicas()
- mock_all_nodes_resp(r, 'OK')
- assert r.cluster_replicate(node, 'c8253bae761cb61857d') is True
- results = r.cluster_replicate(all_replicas, 'c8253bae761cb61857d')
+ mock_all_nodes_resp(r, "OK")
+ assert r.cluster_replicate(node, "c8253bae761cb61857d") is True
+ results = r.cluster_replicate(all_replicas, "c8253bae761cb61857d")
if isinstance(results, dict):
for res in results.values():
assert res is True
@@ -909,74 +933,78 @@ class TestClusterRedisCommands:
assert results is True
def test_cluster_reset(self, r):
- mock_all_nodes_resp(r, 'OK')
+ mock_all_nodes_resp(r, "OK")
assert r.cluster_reset() is True
assert r.cluster_reset(False) is True
- all_results = r.cluster_reset(False, target_nodes='all')
+ all_results = r.cluster_reset(False, target_nodes="all")
for res in all_results.values():
assert res is True
def test_cluster_save_config(self, r):
node = r.get_random_node()
all_nodes = r.get_nodes()
- mock_all_nodes_resp(r, 'OK')
+ mock_all_nodes_resp(r, "OK")
assert r.cluster_save_config(node) is True
all_results = r.cluster_save_config(all_nodes)
for res in all_results.values():
assert res is True
def test_cluster_get_keys_in_slot(self, r):
- response = [b'{foo}1', b'{foo}2']
+ response = [b"{foo}1", b"{foo}2"]
node = r.nodes_manager.get_node_from_slot(12182)
mock_node_resp(node, response)
keys = r.cluster_get_keys_in_slot(12182, 4)
assert keys == response
def test_cluster_set_config_epoch(self, r):
- mock_all_nodes_resp(r, 'OK')
+ mock_all_nodes_resp(r, "OK")
assert r.cluster_set_config_epoch(3) is True
- all_results = r.cluster_set_config_epoch(3, target_nodes='all')
+ all_results = r.cluster_set_config_epoch(3, target_nodes="all")
for res in all_results.values():
assert res is True
def test_cluster_setslot(self, r):
node = r.get_random_node()
- mock_node_resp(node, 'OK')
- assert r.cluster_setslot(node, 'node_0', 1218, 'IMPORTING') is True
- assert r.cluster_setslot(node, 'node_0', 1218, 'NODE') is True
- assert r.cluster_setslot(node, 'node_0', 1218, 'MIGRATING') is True
+ mock_node_resp(node, "OK")
+ assert r.cluster_setslot(node, "node_0", 1218, "IMPORTING") is True
+ assert r.cluster_setslot(node, "node_0", 1218, "NODE") is True
+ assert r.cluster_setslot(node, "node_0", 1218, "MIGRATING") is True
with pytest.raises(RedisError):
- r.cluster_failover(node, 'STABLE')
+ r.cluster_failover(node, "STABLE")
with pytest.raises(RedisError):
- r.cluster_failover(node, 'STATE')
+ r.cluster_failover(node, "STATE")
def test_cluster_setslot_stable(self, r):
node = r.nodes_manager.get_node_from_slot(12182)
- mock_node_resp(node, 'OK')
+ mock_node_resp(node, "OK")
assert r.cluster_setslot_stable(12182) is True
assert node.redis_connection.connection.read_response.called
def test_cluster_replicas(self, r):
- response = [b'01eca22229cf3c652b6fca0d09ff6941e0d2e3 '
- b'127.0.0.1:6377@16377 slave '
- b'52611e796814b78e90ad94be9d769a4f668f9a 0 '
- b'1634550063436 4 connected',
- b'r4xfga22229cf3c652b6fca0d09ff69f3e0d4d '
- b'127.0.0.1:6378@16378 slave '
- b'52611e796814b78e90ad94be9d769a4f668f9a 0 '
- b'1634550063436 4 connected']
+ response = [
+ b"01eca22229cf3c652b6fca0d09ff6941e0d2e3 "
+ b"127.0.0.1:6377@16377 slave "
+ b"52611e796814b78e90ad94be9d769a4f668f9a 0 "
+ b"1634550063436 4 connected",
+ b"r4xfga22229cf3c652b6fca0d09ff69f3e0d4d "
+ b"127.0.0.1:6378@16378 slave "
+ b"52611e796814b78e90ad94be9d769a4f668f9a 0 "
+ b"1634550063436 4 connected",
+ ]
mock_all_nodes_resp(r, response)
- replicas = r.cluster_replicas('52611e796814b78e90ad94be9d769a4f668f9a')
- assert replicas.get('127.0.0.1:6377') is not None
- assert replicas.get('127.0.0.1:6378') is not None
- assert replicas.get('127.0.0.1:6378').get('node_id') == \
- 'r4xfga22229cf3c652b6fca0d09ff69f3e0d4d'
+ replicas = r.cluster_replicas("52611e796814b78e90ad94be9d769a4f668f9a")
+ assert replicas.get("127.0.0.1:6377") is not None
+ assert replicas.get("127.0.0.1:6378") is not None
+ assert (
+ replicas.get("127.0.0.1:6378").get("node_id")
+ == "r4xfga22229cf3c652b6fca0d09ff69f3e0d4d"
+ )
def test_readonly(self):
r = get_mocked_redis_client(host=default_host, port=default_port)
- mock_all_nodes_resp(r, 'OK')
+ mock_all_nodes_resp(r, "OK")
assert r.readonly() is True
- all_replicas_results = r.readonly(target_nodes='replicas')
+ all_replicas_results = r.readonly(target_nodes="replicas")
for res in all_replicas_results.values():
assert res is True
for replica in r.get_replicas():
@@ -984,9 +1012,9 @@ class TestClusterRedisCommands:
def test_readwrite(self):
r = get_mocked_redis_client(host=default_host, port=default_port)
- mock_all_nodes_resp(r, 'OK')
+ mock_all_nodes_resp(r, "OK")
assert r.readwrite() is True
- all_replicas_results = r.readwrite(target_nodes='replicas')
+ all_replicas_results = r.readwrite(target_nodes="replicas")
for res in all_replicas_results.values():
assert res is True
for replica in r.get_replicas():
@@ -999,59 +1027,59 @@ class TestClusterRedisCommands:
def test_info(self, r):
# Map keys to same slot
- r.set('x{1}', 1)
- r.set('y{1}', 2)
- r.set('z{1}', 3)
+ r.set("x{1}", 1)
+ r.set("y{1}", 2)
+ r.set("z{1}", 3)
# Get node that handles the slot
- slot = r.keyslot('x{1}')
+ slot = r.keyslot("x{1}")
node = r.nodes_manager.get_node_from_slot(slot)
# Run info on that node
info = r.info(target_nodes=node)
assert isinstance(info, dict)
- assert info['db0']['keys'] == 3
+ assert info["db0"]["keys"] == 3
def _init_slowlog_test(self, r, node):
- slowlog_lim = r.config_get('slowlog-log-slower-than',
- target_nodes=node)
- assert r.config_set('slowlog-log-slower-than', 0, target_nodes=node) \
- is True
- return slowlog_lim['slowlog-log-slower-than']
+ slowlog_lim = r.config_get("slowlog-log-slower-than", target_nodes=node)
+ assert r.config_set("slowlog-log-slower-than", 0, target_nodes=node) is True
+ return slowlog_lim["slowlog-log-slower-than"]
def _teardown_slowlog_test(self, r, node, prev_limit):
- assert r.config_set('slowlog-log-slower-than', prev_limit,
- target_nodes=node) is True
+ assert (
+ r.config_set("slowlog-log-slower-than", prev_limit, target_nodes=node)
+ is True
+ )
def test_slowlog_get(self, r, slowlog):
- unicode_string = chr(3456) + 'abcd' + chr(3421)
+ unicode_string = chr(3456) + "abcd" + chr(3421)
node = r.get_node_from_key(unicode_string)
slowlog_limit = self._init_slowlog_test(r, node)
assert r.slowlog_reset(target_nodes=node)
r.get(unicode_string)
slowlog = r.slowlog_get(target_nodes=node)
assert isinstance(slowlog, list)
- commands = [log['command'] for log in slowlog]
+ commands = [log["command"] for log in slowlog]
- get_command = b' '.join((b'GET', unicode_string.encode('utf-8')))
+ get_command = b" ".join((b"GET", unicode_string.encode("utf-8")))
assert get_command in commands
- assert b'SLOWLOG RESET' in commands
+ assert b"SLOWLOG RESET" in commands
# the order should be ['GET <uni string>', 'SLOWLOG RESET'],
# but if other clients are executing commands at the same time, there
# could be commands, before, between, or after, so just check that
# the two we care about are in the appropriate order.
- assert commands.index(get_command) < commands.index(b'SLOWLOG RESET')
+ assert commands.index(get_command) < commands.index(b"SLOWLOG RESET")
# make sure other attributes are typed correctly
- assert isinstance(slowlog[0]['start_time'], int)
- assert isinstance(slowlog[0]['duration'], int)
+ assert isinstance(slowlog[0]["start_time"], int)
+ assert isinstance(slowlog[0]["duration"], int)
# rollback the slowlog limit to its original value
self._teardown_slowlog_test(r, node, slowlog_limit)
def test_slowlog_get_limit(self, r, slowlog):
assert r.slowlog_reset()
- node = r.get_node_from_key('foo')
+ node = r.get_node_from_key("foo")
slowlog_limit = self._init_slowlog_test(r, node)
- r.get('foo')
+ r.get("foo")
slowlog = r.slowlog_get(1, target_nodes=node)
assert isinstance(slowlog, list)
# only one command, based on the number we passed to slowlog_get()
@@ -1059,8 +1087,8 @@ class TestClusterRedisCommands:
self._teardown_slowlog_test(r, node, slowlog_limit)
def test_slowlog_length(self, r, slowlog):
- r.get('foo')
- node = r.nodes_manager.get_node_from_slot(key_slot(b'foo'))
+ r.get("foo")
+ node = r.nodes_manager.get_node_from_slot(key_slot(b"foo"))
slowlog_len = r.slowlog_len(target_nodes=node)
assert isinstance(slowlog_len, int)
@@ -1070,47 +1098,46 @@ class TestClusterRedisCommands:
assert isinstance(t[0], int)
assert isinstance(t[1], int)
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_usage(self, r):
- r.set('foo', 'bar')
- assert isinstance(r.memory_usage('foo'), int)
+ r.set("foo", "bar")
+ assert isinstance(r.memory_usage("foo"), int)
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_malloc_stats(self, r):
assert r.memory_malloc_stats()
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_stats(self, r):
# put a key into the current db to make sure that "db.<current-db>"
# has data
- r.set('foo', 'bar')
- node = r.nodes_manager.get_node_from_slot(key_slot(b'foo'))
+ r.set("foo", "bar")
+ node = r.nodes_manager.get_node_from_slot(key_slot(b"foo"))
stats = r.memory_stats(target_nodes=node)
assert isinstance(stats, dict)
for key, value in stats.items():
- if key.startswith('db.'):
+ if key.startswith("db."):
assert isinstance(value, dict)
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_help(self, r):
with pytest.raises(NotImplementedError):
r.memory_help()
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_doctor(self, r):
with pytest.raises(NotImplementedError):
r.memory_doctor()
def test_lastsave(self, r):
node = r.get_primaries()[0]
- assert isinstance(r.lastsave(target_nodes=node),
- datetime.datetime)
+ assert isinstance(r.lastsave(target_nodes=node), datetime.datetime)
def test_cluster_echo(self, r):
node = r.get_primaries()[0]
- assert r.echo('foo bar', node) == b'foo bar'
+ assert r.echo("foo bar", node) == b"foo bar"
- @skip_if_server_version_lt('1.0.0')
+ @skip_if_server_version_lt("1.0.0")
def test_debug_segfault(self, r):
with pytest.raises(NotImplementedError):
r.debug_segfault()
@@ -1118,39 +1145,41 @@ class TestClusterRedisCommands:
def test_config_resetstat(self, r):
node = r.get_primaries()[0]
r.ping(target_nodes=node)
- prior_commands_processed = \
- int(r.info(target_nodes=node)['total_commands_processed'])
+ prior_commands_processed = int(
+ r.info(target_nodes=node)["total_commands_processed"]
+ )
assert prior_commands_processed >= 1
r.config_resetstat(target_nodes=node)
- reset_commands_processed = \
- int(r.info(target_nodes=node)['total_commands_processed'])
+ reset_commands_processed = int(
+ r.info(target_nodes=node)["total_commands_processed"]
+ )
assert reset_commands_processed < prior_commands_processed
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_trackinginfo(self, r):
node = r.get_primaries()[0]
res = r.client_trackinginfo(target_nodes=node)
assert len(res) > 2
- assert 'prefixes' in res
+ assert "prefixes" in res
- @skip_if_server_version_lt('2.9.50')
+ @skip_if_server_version_lt("2.9.50")
def test_client_pause(self, r):
node = r.get_primaries()[0]
assert r.client_pause(1, target_nodes=node)
assert r.client_pause(timeout=1, target_nodes=node)
with pytest.raises(RedisError):
- r.client_pause(timeout='not an integer', target_nodes=node)
+ r.client_pause(timeout="not an integer", target_nodes=node)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_unpause(self, r):
assert r.client_unpause()
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_client_id(self, r):
node = r.get_primaries()[0]
assert r.client_id(target_nodes=node) > 0
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_client_unblock(self, r):
node = r.get_primaries()[0]
myid = r.client_id(target_nodes=node)
@@ -1158,82 +1187,88 @@ class TestClusterRedisCommands:
assert not r.client_unblock(myid, error=True, target_nodes=node)
assert not r.client_unblock(myid, error=False, target_nodes=node)
- @skip_if_server_version_lt('6.0.0')
+ @skip_if_server_version_lt("6.0.0")
def test_client_getredir(self, r):
node = r.get_primaries()[0]
assert isinstance(r.client_getredir(target_nodes=node), int)
assert r.client_getredir(target_nodes=node) == -1
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_info(self, r):
node = r.get_primaries()[0]
info = r.client_info(target_nodes=node)
assert isinstance(info, dict)
- assert 'addr' in info
+ assert "addr" in info
- @skip_if_server_version_lt('2.6.9')
+ @skip_if_server_version_lt("2.6.9")
def test_client_kill(self, r, r2):
node = r.get_primaries()[0]
- r.client_setname('redis-py-c1', target_nodes='all')
- r2.client_setname('redis-py-c2', target_nodes='all')
- clients = [client for client in r.client_list(target_nodes=node)
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ r.client_setname("redis-py-c1", target_nodes="all")
+ r2.client_setname("redis-py-c2", target_nodes="all")
+ clients = [
+ client
+ for client in r.client_list(target_nodes=node)
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 2
- clients_by_name = {client.get('name'): client for client in clients}
+ clients_by_name = {client.get("name"): client for client in clients}
- client_addr = clients_by_name['redis-py-c2'].get('addr')
+ client_addr = clients_by_name["redis-py-c2"].get("addr")
assert r.client_kill(client_addr, target_nodes=node) is True
- clients = [client for client in r.client_list(target_nodes=node)
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ clients = [
+ client
+ for client in r.client_list(target_nodes=node)
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 1
- assert clients[0].get('name') == 'redis-py-c1'
+ assert clients[0].get("name") == "redis-py-c1"
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_cluster_bitop_not_empty_string(self, r):
- r['{foo}a'] = ''
- r.bitop('not', '{foo}r', '{foo}a')
- assert r.get('{foo}r') is None
+ r["{foo}a"] = ""
+ r.bitop("not", "{foo}r", "{foo}a")
+ assert r.get("{foo}r") is None
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_cluster_bitop_not(self, r):
- test_str = b'\xAA\x00\xFF\x55'
+ test_str = b"\xAA\x00\xFF\x55"
correct = ~0xAA00FF55 & 0xFFFFFFFF
- r['{foo}a'] = test_str
- r.bitop('not', '{foo}r', '{foo}a')
- assert int(binascii.hexlify(r['{foo}r']), 16) == correct
+ r["{foo}a"] = test_str
+ r.bitop("not", "{foo}r", "{foo}a")
+ assert int(binascii.hexlify(r["{foo}r"]), 16) == correct
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_cluster_bitop_not_in_place(self, r):
- test_str = b'\xAA\x00\xFF\x55'
+ test_str = b"\xAA\x00\xFF\x55"
correct = ~0xAA00FF55 & 0xFFFFFFFF
- r['{foo}a'] = test_str
- r.bitop('not', '{foo}a', '{foo}a')
- assert int(binascii.hexlify(r['{foo}a']), 16) == correct
+ r["{foo}a"] = test_str
+ r.bitop("not", "{foo}a", "{foo}a")
+ assert int(binascii.hexlify(r["{foo}a"]), 16) == correct
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_cluster_bitop_single_string(self, r):
- test_str = b'\x01\x02\xFF'
- r['{foo}a'] = test_str
- r.bitop('and', '{foo}res1', '{foo}a')
- r.bitop('or', '{foo}res2', '{foo}a')
- r.bitop('xor', '{foo}res3', '{foo}a')
- assert r['{foo}res1'] == test_str
- assert r['{foo}res2'] == test_str
- assert r['{foo}res3'] == test_str
-
- @skip_if_server_version_lt('2.6.0')
+ test_str = b"\x01\x02\xFF"
+ r["{foo}a"] = test_str
+ r.bitop("and", "{foo}res1", "{foo}a")
+ r.bitop("or", "{foo}res2", "{foo}a")
+ r.bitop("xor", "{foo}res3", "{foo}a")
+ assert r["{foo}res1"] == test_str
+ assert r["{foo}res2"] == test_str
+ assert r["{foo}res3"] == test_str
+
+ @skip_if_server_version_lt("2.6.0")
def test_cluster_bitop_string_operands(self, r):
- r['{foo}a'] = b'\x01\x02\xFF\xFF'
- r['{foo}b'] = b'\x01\x02\xFF'
- r.bitop('and', '{foo}res1', '{foo}a', '{foo}b')
- r.bitop('or', '{foo}res2', '{foo}a', '{foo}b')
- r.bitop('xor', '{foo}res3', '{foo}a', '{foo}b')
- assert int(binascii.hexlify(r['{foo}res1']), 16) == 0x0102FF00
- assert int(binascii.hexlify(r['{foo}res2']), 16) == 0x0102FFFF
- assert int(binascii.hexlify(r['{foo}res3']), 16) == 0x000000FF
-
- @skip_if_server_version_lt('6.2.0')
+ r["{foo}a"] = b"\x01\x02\xFF\xFF"
+ r["{foo}b"] = b"\x01\x02\xFF"
+ r.bitop("and", "{foo}res1", "{foo}a", "{foo}b")
+ r.bitop("or", "{foo}res2", "{foo}a", "{foo}b")
+ r.bitop("xor", "{foo}res3", "{foo}a", "{foo}b")
+ assert int(binascii.hexlify(r["{foo}res1"]), 16) == 0x0102FF00
+ assert int(binascii.hexlify(r["{foo}res2"]), 16) == 0x0102FFFF
+ assert int(binascii.hexlify(r["{foo}res3"]), 16) == 0x000000FF
+
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_copy(self, r):
assert r.copy("{foo}a", "{foo}b") == 0
r.set("{foo}a", "bar")
@@ -1241,449 +1276,493 @@ class TestClusterRedisCommands:
assert r.get("{foo}a") == b"bar"
assert r.get("{foo}b") == b"bar"
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_copy_and_replace(self, r):
r.set("{foo}a", "foo1")
r.set("{foo}b", "foo2")
assert r.copy("{foo}a", "{foo}b") == 0
assert r.copy("{foo}a", "{foo}b", replace=True) == 1
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_lmove(self, r):
- r.rpush('{foo}a', 'one', 'two', 'three', 'four')
- assert r.lmove('{foo}a', '{foo}b')
- assert r.lmove('{foo}a', '{foo}b', 'right', 'left')
+ r.rpush("{foo}a", "one", "two", "three", "four")
+ assert r.lmove("{foo}a", "{foo}b")
+ assert r.lmove("{foo}a", "{foo}b", "right", "left")
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_blmove(self, r):
- r.rpush('{foo}a', 'one', 'two', 'three', 'four')
- assert r.blmove('{foo}a', '{foo}b', 5)
- assert r.blmove('{foo}a', '{foo}b', 1, 'RIGHT', 'LEFT')
+ r.rpush("{foo}a", "one", "two", "three", "four")
+ assert r.blmove("{foo}a", "{foo}b", 5)
+ assert r.blmove("{foo}a", "{foo}b", 1, "RIGHT", "LEFT")
def test_cluster_msetnx(self, r):
- d = {'{foo}a': b'1', '{foo}b': b'2', '{foo}c': b'3'}
+ d = {"{foo}a": b"1", "{foo}b": b"2", "{foo}c": b"3"}
assert r.msetnx(d)
- d2 = {'{foo}a': b'x', '{foo}d': b'4'}
+ d2 = {"{foo}a": b"x", "{foo}d": b"4"}
assert not r.msetnx(d2)
for k, v in d.items():
assert r[k] == v
- assert r.get('{foo}d') is None
+ assert r.get("{foo}d") is None
def test_cluster_rename(self, r):
- r['{foo}a'] = '1'
- assert r.rename('{foo}a', '{foo}b')
- assert r.get('{foo}a') is None
- assert r['{foo}b'] == b'1'
+ r["{foo}a"] = "1"
+ assert r.rename("{foo}a", "{foo}b")
+ assert r.get("{foo}a") is None
+ assert r["{foo}b"] == b"1"
def test_cluster_renamenx(self, r):
- r['{foo}a'] = '1'
- r['{foo}b'] = '2'
- assert not r.renamenx('{foo}a', '{foo}b')
- assert r['{foo}a'] == b'1'
- assert r['{foo}b'] == b'2'
+ r["{foo}a"] = "1"
+ r["{foo}b"] = "2"
+ assert not r.renamenx("{foo}a", "{foo}b")
+ assert r["{foo}a"] == b"1"
+ assert r["{foo}b"] == b"2"
# LIST COMMANDS
def test_cluster_blpop(self, r):
- r.rpush('{foo}a', '1', '2')
- r.rpush('{foo}b', '3', '4')
- assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'3')
- assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'4')
- assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'1')
- assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'2')
- assert r.blpop(['{foo}b', '{foo}a'], timeout=1) is None
- r.rpush('{foo}c', '1')
- assert r.blpop('{foo}c', timeout=1) == (b'{foo}c', b'1')
+ r.rpush("{foo}a", "1", "2")
+ r.rpush("{foo}b", "3", "4")
+ assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"3")
+ assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"4")
+ assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"1")
+ assert r.blpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"2")
+ assert r.blpop(["{foo}b", "{foo}a"], timeout=1) is None
+ r.rpush("{foo}c", "1")
+ assert r.blpop("{foo}c", timeout=1) == (b"{foo}c", b"1")
def test_cluster_brpop(self, r):
- r.rpush('{foo}a', '1', '2')
- r.rpush('{foo}b', '3', '4')
- assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'4')
- assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'3')
- assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'2')
- assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'1')
- assert r.brpop(['{foo}b', '{foo}a'], timeout=1) is None
- r.rpush('{foo}c', '1')
- assert r.brpop('{foo}c', timeout=1) == (b'{foo}c', b'1')
+ r.rpush("{foo}a", "1", "2")
+ r.rpush("{foo}b", "3", "4")
+ assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"4")
+ assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"3")
+ assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"2")
+ assert r.brpop(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"1")
+ assert r.brpop(["{foo}b", "{foo}a"], timeout=1) is None
+ r.rpush("{foo}c", "1")
+ assert r.brpop("{foo}c", timeout=1) == (b"{foo}c", b"1")
def test_cluster_brpoplpush(self, r):
- r.rpush('{foo}a', '1', '2')
- r.rpush('{foo}b', '3', '4')
- assert r.brpoplpush('{foo}a', '{foo}b') == b'2'
- assert r.brpoplpush('{foo}a', '{foo}b') == b'1'
- assert r.brpoplpush('{foo}a', '{foo}b', timeout=1) is None
- assert r.lrange('{foo}a', 0, -1) == []
- assert r.lrange('{foo}b', 0, -1) == [b'1', b'2', b'3', b'4']
+ r.rpush("{foo}a", "1", "2")
+ r.rpush("{foo}b", "3", "4")
+ assert r.brpoplpush("{foo}a", "{foo}b") == b"2"
+ assert r.brpoplpush("{foo}a", "{foo}b") == b"1"
+ assert r.brpoplpush("{foo}a", "{foo}b", timeout=1) is None
+ assert r.lrange("{foo}a", 0, -1) == []
+ assert r.lrange("{foo}b", 0, -1) == [b"1", b"2", b"3", b"4"]
def test_cluster_brpoplpush_empty_string(self, r):
- r.rpush('{foo}a', '')
- assert r.brpoplpush('{foo}a', '{foo}b') == b''
+ r.rpush("{foo}a", "")
+ assert r.brpoplpush("{foo}a", "{foo}b") == b""
def test_cluster_rpoplpush(self, r):
- r.rpush('{foo}a', 'a1', 'a2', 'a3')
- r.rpush('{foo}b', 'b1', 'b2', 'b3')
- assert r.rpoplpush('{foo}a', '{foo}b') == b'a3'
- assert r.lrange('{foo}a', 0, -1) == [b'a1', b'a2']
- assert r.lrange('{foo}b', 0, -1) == [b'a3', b'b1', b'b2', b'b3']
+ r.rpush("{foo}a", "a1", "a2", "a3")
+ r.rpush("{foo}b", "b1", "b2", "b3")
+ assert r.rpoplpush("{foo}a", "{foo}b") == b"a3"
+ assert r.lrange("{foo}a", 0, -1) == [b"a1", b"a2"]
+ assert r.lrange("{foo}b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"]
def test_cluster_sdiff(self, r):
- r.sadd('{foo}a', '1', '2', '3')
- assert r.sdiff('{foo}a', '{foo}b') == {b'1', b'2', b'3'}
- r.sadd('{foo}b', '2', '3')
- assert r.sdiff('{foo}a', '{foo}b') == {b'1'}
+ r.sadd("{foo}a", "1", "2", "3")
+ assert r.sdiff("{foo}a", "{foo}b") == {b"1", b"2", b"3"}
+ r.sadd("{foo}b", "2", "3")
+ assert r.sdiff("{foo}a", "{foo}b") == {b"1"}
def test_cluster_sdiffstore(self, r):
- r.sadd('{foo}a', '1', '2', '3')
- assert r.sdiffstore('{foo}c', '{foo}a', '{foo}b') == 3
- assert r.smembers('{foo}c') == {b'1', b'2', b'3'}
- r.sadd('{foo}b', '2', '3')
- assert r.sdiffstore('{foo}c', '{foo}a', '{foo}b') == 1
- assert r.smembers('{foo}c') == {b'1'}
+ r.sadd("{foo}a", "1", "2", "3")
+ assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 3
+ assert r.smembers("{foo}c") == {b"1", b"2", b"3"}
+ r.sadd("{foo}b", "2", "3")
+ assert r.sdiffstore("{foo}c", "{foo}a", "{foo}b") == 1
+ assert r.smembers("{foo}c") == {b"1"}
def test_cluster_sinter(self, r):
- r.sadd('{foo}a', '1', '2', '3')
- assert r.sinter('{foo}a', '{foo}b') == set()
- r.sadd('{foo}b', '2', '3')
- assert r.sinter('{foo}a', '{foo}b') == {b'2', b'3'}
+ r.sadd("{foo}a", "1", "2", "3")
+ assert r.sinter("{foo}a", "{foo}b") == set()
+ r.sadd("{foo}b", "2", "3")
+ assert r.sinter("{foo}a", "{foo}b") == {b"2", b"3"}
def test_cluster_sinterstore(self, r):
- r.sadd('{foo}a', '1', '2', '3')
- assert r.sinterstore('{foo}c', '{foo}a', '{foo}b') == 0
- assert r.smembers('{foo}c') == set()
- r.sadd('{foo}b', '2', '3')
- assert r.sinterstore('{foo}c', '{foo}a', '{foo}b') == 2
- assert r.smembers('{foo}c') == {b'2', b'3'}
+ r.sadd("{foo}a", "1", "2", "3")
+ assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 0
+ assert r.smembers("{foo}c") == set()
+ r.sadd("{foo}b", "2", "3")
+ assert r.sinterstore("{foo}c", "{foo}a", "{foo}b") == 2
+ assert r.smembers("{foo}c") == {b"2", b"3"}
def test_cluster_smove(self, r):
- r.sadd('{foo}a', 'a1', 'a2')
- r.sadd('{foo}b', 'b1', 'b2')
- assert r.smove('{foo}a', '{foo}b', 'a1')
- assert r.smembers('{foo}a') == {b'a2'}
- assert r.smembers('{foo}b') == {b'b1', b'b2', b'a1'}
+ r.sadd("{foo}a", "a1", "a2")
+ r.sadd("{foo}b", "b1", "b2")
+ assert r.smove("{foo}a", "{foo}b", "a1")
+ assert r.smembers("{foo}a") == {b"a2"}
+ assert r.smembers("{foo}b") == {b"b1", b"b2", b"a1"}
def test_cluster_sunion(self, r):
- r.sadd('{foo}a', '1', '2')
- r.sadd('{foo}b', '2', '3')
- assert r.sunion('{foo}a', '{foo}b') == {b'1', b'2', b'3'}
+ r.sadd("{foo}a", "1", "2")
+ r.sadd("{foo}b", "2", "3")
+ assert r.sunion("{foo}a", "{foo}b") == {b"1", b"2", b"3"}
def test_cluster_sunionstore(self, r):
- r.sadd('{foo}a', '1', '2')
- r.sadd('{foo}b', '2', '3')
- assert r.sunionstore('{foo}c', '{foo}a', '{foo}b') == 3
- assert r.smembers('{foo}c') == {b'1', b'2', b'3'}
+ r.sadd("{foo}a", "1", "2")
+ r.sadd("{foo}b", "2", "3")
+ assert r.sunionstore("{foo}c", "{foo}a", "{foo}b") == 3
+ assert r.smembers("{foo}c") == {b"1", b"2", b"3"}
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_zdiff(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('{foo}b', {'a1': 1, 'a2': 2})
- assert r.zdiff(['{foo}a', '{foo}b']) == [b'a3']
- assert r.zdiff(['{foo}a', '{foo}b'], withscores=True) == [b'a3', b'3']
+ r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("{foo}b", {"a1": 1, "a2": 2})
+ assert r.zdiff(["{foo}a", "{foo}b"]) == [b"a3"]
+ assert r.zdiff(["{foo}a", "{foo}b"], withscores=True) == [b"a3", b"3"]
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_zdiffstore(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('{foo}b', {'a1': 1, 'a2': 2})
- assert r.zdiffstore("{foo}out", ['{foo}a', '{foo}b'])
- assert r.zrange("{foo}out", 0, -1) == [b'a3']
- assert r.zrange("{foo}out", 0, -1, withscores=True) == [(b'a3', 3.0)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("{foo}b", {"a1": 1, "a2": 2})
+ assert r.zdiffstore("{foo}out", ["{foo}a", "{foo}b"])
+ assert r.zrange("{foo}out", 0, -1) == [b"a3"]
+ assert r.zrange("{foo}out", 0, -1, withscores=True) == [(b"a3", 3.0)]
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_zinter(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinter(['{foo}a', '{foo}b', '{foo}c']) == [b'a3', b'a1']
+ r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinter(["{foo}a", "{foo}b", "{foo}c"]) == [b"a3", b"a1"]
# invalid aggregation
with pytest.raises(DataError):
- r.zinter(['{foo}a', '{foo}b', '{foo}c'],
- aggregate='foo', withscores=True)
+ r.zinter(["{foo}a", "{foo}b", "{foo}c"], aggregate="foo", withscores=True)
# aggregate with SUM
- assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], withscores=True) \
- == [(b'a3', 8), (b'a1', 9)]
+ assert r.zinter(["{foo}a", "{foo}b", "{foo}c"], withscores=True) == [
+ (b"a3", 8),
+ (b"a1", 9),
+ ]
# aggregate with MAX
- assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX',
- withscores=True) \
- == [(b'a3', 5), (b'a1', 6)]
+ assert r.zinter(
+ ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX", withscores=True
+ ) == [(b"a3", 5), (b"a1", 6)]
# aggregate with MIN
- assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN',
- withscores=True) \
- == [(b'a1', 1), (b'a3', 1)]
+ assert r.zinter(
+ ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True
+ ) == [(b"a1", 1), (b"a3", 1)]
# with weights
- assert r.zinter({'{foo}a': 1, '{foo}b': 2, '{foo}c': 3},
- withscores=True) \
- == [(b'a3', 20), (b'a1', 23)]
+ assert r.zinter({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True) == [
+ (b"a3", 20),
+ (b"a1", 23),
+ ]
def test_cluster_zinterstore_sum(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore('{foo}d', ['{foo}a', '{foo}b', '{foo}c']) == 2
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a3', 8), (b'a1', 9)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 2
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 8), (b"a1", 9)]
def test_cluster_zinterstore_max(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore(
- '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX') == 2
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a3', 5), (b'a1', 6)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert (
+ r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX")
+ == 2
+ )
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 5), (b"a1", 6)]
def test_cluster_zinterstore_min(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('{foo}b', {'a1': 2, 'a2': 3, 'a3': 5})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore(
- '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN') == 2
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a1', 1), (b'a3', 3)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("{foo}b", {"a1": 2, "a2": 3, "a3": 5})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert (
+ r.zinterstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN")
+ == 2
+ )
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a1", 1), (b"a3", 3)]
def test_cluster_zinterstore_with_weight(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore(
- '{foo}d', {'{foo}a': 1, '{foo}b': 2, '{foo}c': 3}) == 2
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a3', 20), (b'a1', 23)]
-
- @skip_if_server_version_lt('4.9.0')
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinterstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 2
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [(b"a3", 20), (b"a1", 23)]
+
+ @skip_if_server_version_lt("4.9.0")
def test_cluster_bzpopmax(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2})
- r.zadd('{foo}b', {'b1': 10, 'b2': 20})
- assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}b', b'b2', 20)
- assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}b', b'b1', 10)
- assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}a', b'a2', 2)
- assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}a', b'a1', 1)
- assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) is None
- r.zadd('{foo}c', {'c1': 100})
- assert r.bzpopmax('{foo}c', timeout=1) == (b'{foo}c', b'c1', 100)
-
- @skip_if_server_version_lt('4.9.0')
+ r.zadd("{foo}a", {"a1": 1, "a2": 2})
+ r.zadd("{foo}b", {"b1": 10, "b2": 20})
+ assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b2", 20)
+ assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b1", 10)
+ assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a2", 2)
+ assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a1", 1)
+ assert r.bzpopmax(["{foo}b", "{foo}a"], timeout=1) is None
+ r.zadd("{foo}c", {"c1": 100})
+ assert r.bzpopmax("{foo}c", timeout=1) == (b"{foo}c", b"c1", 100)
+
+ @skip_if_server_version_lt("4.9.0")
def test_cluster_bzpopmin(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2})
- r.zadd('{foo}b', {'b1': 10, 'b2': 20})
- assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}b', b'b1', 10)
- assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}b', b'b2', 20)
- assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}a', b'a1', 1)
- assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
- b'{foo}a', b'a2', 2)
- assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) is None
- r.zadd('{foo}c', {'c1': 100})
- assert r.bzpopmin('{foo}c', timeout=1) == (b'{foo}c', b'c1', 100)
-
- @skip_if_server_version_lt('6.2.0')
+ r.zadd("{foo}a", {"a1": 1, "a2": 2})
+ r.zadd("{foo}b", {"b1": 10, "b2": 20})
+ assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b1", 10)
+ assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}b", b"b2", 20)
+ assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a1", 1)
+ assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) == (b"{foo}a", b"a2", 2)
+ assert r.bzpopmin(["{foo}b", "{foo}a"], timeout=1) is None
+ r.zadd("{foo}c", {"c1": 100})
+ assert r.bzpopmin("{foo}c", timeout=1) == (b"{foo}c", b"c1", 100)
+
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_zrangestore(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zrangestore('{foo}b', '{foo}a', 0, 1)
- assert r.zrange('{foo}b', 0, -1) == [b'a1', b'a2']
- assert r.zrangestore('{foo}b', '{foo}a', 1, 2)
- assert r.zrange('{foo}b', 0, -1) == [b'a2', b'a3']
- assert r.zrange('{foo}b', 0, -1, withscores=True) == \
- [(b'a2', 2), (b'a3', 3)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zrangestore("{foo}b", "{foo}a", 0, 1)
+ assert r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"]
+ assert r.zrangestore("{foo}b", "{foo}a", 1, 2)
+ assert r.zrange("{foo}b", 0, -1) == [b"a2", b"a3"]
+ assert r.zrange("{foo}b", 0, -1, withscores=True) == [(b"a2", 2), (b"a3", 3)]
# reversed order
- assert r.zrangestore('{foo}b', '{foo}a', 1, 2, desc=True)
- assert r.zrange('{foo}b', 0, -1) == [b'a1', b'a2']
+ assert r.zrangestore("{foo}b", "{foo}a", 1, 2, desc=True)
+ assert r.zrange("{foo}b", 0, -1) == [b"a1", b"a2"]
# by score
- assert r.zrangestore('{foo}b', '{foo}a', 2, 1, byscore=True,
- offset=0, num=1, desc=True)
- assert r.zrange('{foo}b', 0, -1) == [b'a2']
+ assert r.zrangestore(
+ "{foo}b", "{foo}a", 2, 1, byscore=True, offset=0, num=1, desc=True
+ )
+ assert r.zrange("{foo}b", 0, -1) == [b"a2"]
# by lex
- assert r.zrangestore('{foo}b', '{foo}a', '[a2', '(a3', bylex=True,
- offset=0, num=1)
- assert r.zrange('{foo}b', 0, -1) == [b'a2']
+ assert r.zrangestore(
+ "{foo}b", "{foo}a", "[a2", "(a3", bylex=True, offset=0, num=1
+ )
+ assert r.zrange("{foo}b", 0, -1) == [b"a2"]
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_zunion(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
# sum
- assert r.zunion(['{foo}a', '{foo}b', '{foo}c']) == \
- [b'a2', b'a4', b'a3', b'a1']
- assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], withscores=True) == \
- [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ assert r.zunion(["{foo}a", "{foo}b", "{foo}c"]) == [b"a2", b"a4", b"a3", b"a1"]
+ assert r.zunion(["{foo}a", "{foo}b", "{foo}c"], withscores=True) == [
+ (b"a2", 3),
+ (b"a4", 4),
+ (b"a3", 8),
+ (b"a1", 9),
+ ]
# max
- assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX',
- withscores=True) \
- == [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ assert r.zunion(
+ ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX", withscores=True
+ ) == [(b"a2", 2), (b"a4", 4), (b"a3", 5), (b"a1", 6)]
# min
- assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN',
- withscores=True) \
- == [(b'a1', 1), (b'a2', 1), (b'a3', 1), (b'a4', 4)]
+ assert r.zunion(
+ ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN", withscores=True
+ ) == [(b"a1", 1), (b"a2", 1), (b"a3", 1), (b"a4", 4)]
# with weight
- assert r.zunion({'{foo}a': 1, '{foo}b': 2, '{foo}c': 3},
- withscores=True) \
- == [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+ assert r.zunion({"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}, withscores=True) == [
+ (b"a2", 5),
+ (b"a4", 12),
+ (b"a3", 20),
+ (b"a1", 23),
+ ]
def test_cluster_zunionstore_sum(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore('{foo}d', ['{foo}a', '{foo}b', '{foo}c']) == 4
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"]) == 4
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+ (b"a2", 3),
+ (b"a4", 4),
+ (b"a3", 8),
+ (b"a1", 9),
+ ]
def test_cluster_zunionstore_max(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore(
- '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX') == 4
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert (
+ r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MAX")
+ == 4
+ )
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+ (b"a2", 2),
+ (b"a4", 4),
+ (b"a3", 5),
+ (b"a1", 6),
+ ]
def test_cluster_zunionstore_min(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 4})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore(
- '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN') == 4
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)]
+ r.zadd("{foo}a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 4})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert (
+ r.zunionstore("{foo}d", ["{foo}a", "{foo}b", "{foo}c"], aggregate="MIN")
+ == 4
+ )
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+ (b"a1", 1),
+ (b"a2", 2),
+ (b"a3", 3),
+ (b"a4", 4),
+ ]
def test_cluster_zunionstore_with_weight(self, r):
- r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore(
- '{foo}d', {'{foo}a': 1, '{foo}b': 2, '{foo}c': 3}) == 4
- assert r.zrange('{foo}d', 0, -1, withscores=True) == \
- [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
-
- @skip_if_server_version_lt('2.8.9')
+ r.zadd("{foo}a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("{foo}b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("{foo}c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zunionstore("{foo}d", {"{foo}a": 1, "{foo}b": 2, "{foo}c": 3}) == 4
+ assert r.zrange("{foo}d", 0, -1, withscores=True) == [
+ (b"a2", 5),
+ (b"a4", 12),
+ (b"a3", 20),
+ (b"a1", 23),
+ ]
+
+ @skip_if_server_version_lt("2.8.9")
def test_cluster_pfcount(self, r):
- members = {b'1', b'2', b'3'}
- r.pfadd('{foo}a', *members)
- assert r.pfcount('{foo}a') == len(members)
- members_b = {b'2', b'3', b'4'}
- r.pfadd('{foo}b', *members_b)
- assert r.pfcount('{foo}b') == len(members_b)
- assert r.pfcount('{foo}a', '{foo}b') == len(members_b.union(members))
-
- @skip_if_server_version_lt('2.8.9')
+ members = {b"1", b"2", b"3"}
+ r.pfadd("{foo}a", *members)
+ assert r.pfcount("{foo}a") == len(members)
+ members_b = {b"2", b"3", b"4"}
+ r.pfadd("{foo}b", *members_b)
+ assert r.pfcount("{foo}b") == len(members_b)
+ assert r.pfcount("{foo}a", "{foo}b") == len(members_b.union(members))
+
+ @skip_if_server_version_lt("2.8.9")
def test_cluster_pfmerge(self, r):
- mema = {b'1', b'2', b'3'}
- memb = {b'2', b'3', b'4'}
- memc = {b'5', b'6', b'7'}
- r.pfadd('{foo}a', *mema)
- r.pfadd('{foo}b', *memb)
- r.pfadd('{foo}c', *memc)
- r.pfmerge('{foo}d', '{foo}c', '{foo}a')
- assert r.pfcount('{foo}d') == 6
- r.pfmerge('{foo}d', '{foo}b')
- assert r.pfcount('{foo}d') == 7
+ mema = {b"1", b"2", b"3"}
+ memb = {b"2", b"3", b"4"}
+ memc = {b"5", b"6", b"7"}
+ r.pfadd("{foo}a", *mema)
+ r.pfadd("{foo}b", *memb)
+ r.pfadd("{foo}c", *memc)
+ r.pfmerge("{foo}d", "{foo}c", "{foo}a")
+ assert r.pfcount("{foo}d") == 6
+ r.pfmerge("{foo}d", "{foo}b")
+ assert r.pfcount("{foo}d") == 7
def test_cluster_sort_store(self, r):
- r.rpush('{foo}a', '2', '3', '1')
- assert r.sort('{foo}a', store='{foo}sorted_values') == 3
- assert r.lrange('{foo}sorted_values', 0, -1) == [b'1', b'2', b'3']
+ r.rpush("{foo}a", "2", "3", "1")
+ assert r.sort("{foo}a", store="{foo}sorted_values") == 3
+ assert r.lrange("{foo}sorted_values", 0, -1) == [b"1", b"2", b"3"]
# GEO COMMANDS
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_cluster_geosearchstore(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('{foo}barcelona', values)
- r.geosearchstore('{foo}places_barcelona', '{foo}barcelona',
- longitude=2.191, latitude=41.433, radius=1000)
- assert r.zrange('{foo}places_barcelona', 0, -1) == [b'place1']
+ r.geoadd("{foo}barcelona", values)
+ r.geosearchstore(
+ "{foo}places_barcelona",
+ "{foo}barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1000,
+ )
+ assert r.zrange("{foo}places_barcelona", 0, -1) == [b"place1"]
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearchstore_dist(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('{foo}barcelona', values)
- r.geosearchstore('{foo}places_barcelona', '{foo}barcelona',
- longitude=2.191, latitude=41.433,
- radius=1000, storedist=True)
+ r.geoadd("{foo}barcelona", values)
+ r.geosearchstore(
+ "{foo}places_barcelona",
+ "{foo}barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1000,
+ storedist=True,
+ )
# instead of save the geo score, the distance is saved.
- assert r.zscore('{foo}places_barcelona', 'place1') == 88.05060698409301
+ assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_cluster_georadius_store(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('{foo}barcelona', values)
- r.georadius('{foo}barcelona', 2.191, 41.433,
- 1000, store='{foo}places_barcelona')
- assert r.zrange('{foo}places_barcelona', 0, -1) == [b'place1']
+ r.geoadd("{foo}barcelona", values)
+ r.georadius(
+ "{foo}barcelona", 2.191, 41.433, 1000, store="{foo}places_barcelona"
+ )
+ assert r.zrange("{foo}places_barcelona", 0, -1) == [b"place1"]
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_cluster_georadius_store_dist(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('{foo}barcelona', values)
- r.georadius('{foo}barcelona', 2.191, 41.433, 1000,
- store_dist='{foo}places_barcelona')
+ r.geoadd("{foo}barcelona", values)
+ r.georadius(
+ "{foo}barcelona", 2.191, 41.433, 1000, store_dist="{foo}places_barcelona"
+ )
# instead of save the geo score, the distance is saved.
- assert r.zscore('{foo}places_barcelona', 'place1') == 88.05060698409301
+ assert r.zscore("{foo}places_barcelona", "place1") == 88.05060698409301
def test_cluster_dbsize(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ d = {"a": b"1", "b": b"2", "c": b"3", "d": b"4"}
assert r.mset_nonatomic(d)
- assert r.dbsize(target_nodes='primaries') == len(d)
+ assert r.dbsize(target_nodes="primaries") == len(d)
def test_cluster_keys(self, r):
assert r.keys() == []
- keys_with_underscores = {b'test_a', b'test_b'}
- keys = keys_with_underscores.union({b'testc'})
+ keys_with_underscores = {b"test_a", b"test_b"}
+ keys = keys_with_underscores.union({b"testc"})
for key in keys:
r[key] = 1
- assert set(r.keys(pattern='test_*', target_nodes='primaries')) == \
- keys_with_underscores
- assert set(r.keys(pattern='test*', target_nodes='primaries')) == keys
+ assert (
+ set(r.keys(pattern="test_*", target_nodes="primaries"))
+ == keys_with_underscores
+ )
+ assert set(r.keys(pattern="test*", target_nodes="primaries")) == keys
# SCAN COMMANDS
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_cluster_scan(self, r):
- r.set('a', 1)
- r.set('b', 2)
- r.set('c', 3)
- cursor, keys = r.scan(target_nodes='primaries')
+ r.set("a", 1)
+ r.set("b", 2)
+ r.set("c", 3)
+ cursor, keys = r.scan(target_nodes="primaries")
assert cursor == 0
- assert set(keys) == {b'a', b'b', b'c'}
- _, keys = r.scan(match='a', target_nodes='primaries')
- assert set(keys) == {b'a'}
+ assert set(keys) == {b"a", b"b", b"c"}
+ _, keys = r.scan(match="a", target_nodes="primaries")
+ assert set(keys) == {b"a"}
@skip_if_server_version_lt("6.0.0")
def test_cluster_scan_type(self, r):
- r.sadd('a-set', 1)
- r.hset('a-hash', 'foo', 2)
- r.lpush('a-list', 'aux', 3)
- _, keys = r.scan(match='a*', _type='SET', target_nodes='primaries')
- assert set(keys) == {b'a-set'}
+ r.sadd("a-set", 1)
+ r.hset("a-hash", "foo", 2)
+ r.lpush("a-list", "aux", 3)
+ _, keys = r.scan(match="a*", _type="SET", target_nodes="primaries")
+ assert set(keys) == {b"a-set"}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_cluster_scan_iter(self, r):
- r.set('a', 1)
- r.set('b', 2)
- r.set('c', 3)
- keys = list(r.scan_iter(target_nodes='primaries'))
- assert set(keys) == {b'a', b'b', b'c'}
- keys = list(r.scan_iter(match='a', target_nodes='primaries'))
- assert set(keys) == {b'a'}
+ r.set("a", 1)
+ r.set("b", 2)
+ r.set("c", 3)
+ keys = list(r.scan_iter(target_nodes="primaries"))
+ assert set(keys) == {b"a", b"b", b"c"}
+ keys = list(r.scan_iter(match="a", target_nodes="primaries"))
+ assert set(keys) == {b"a"}
def test_cluster_randomkey(self, r):
- node = r.get_node_from_key('{foo}')
+ node = r.get_node_from_key("{foo}")
assert r.randomkey(target_nodes=node) is None
- for key in ('{foo}a', '{foo}b', '{foo}c'):
+ for key in ("{foo}a", "{foo}b", "{foo}c"):
r[key] = 1
- assert r.randomkey(target_nodes=node) in \
- (b'{foo}a', b'{foo}b', b'{foo}c')
+ assert r.randomkey(target_nodes=node) in (b"{foo}a", b"{foo}b", b"{foo}c")
@pytest.mark.onlycluster
@@ -1704,7 +1783,7 @@ class TestNodesManager:
node_5 = ClusterNode(default_host, 6375, REPLICA)
n_manager.slots_cache = {
slot_1: [node_1, node_2, node_3],
- slot_2: [node_4, node_5]
+ slot_2: [node_4, node_5],
}
primary1_name = n_manager.slots_cache[slot_1][0].name
primary2_name = n_manager.slots_cache[slot_2][0].name
@@ -1730,17 +1809,17 @@ class TestNodesManager:
"""
# Missing slot 5460
cluster_slots = [
- [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
- [5461, 10922, ['127.0.0.1', 7001],
- ['127.0.0.1', 7004]],
- [10923, 16383, ['127.0.0.1', 7002],
- ['127.0.0.1', 7005]],
+ [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+ [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+ [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
]
with pytest.raises(RedisClusterException) as ex:
- get_mocked_redis_client(host=default_host, port=default_port,
- cluster_slots=cluster_slots)
+ get_mocked_redis_client(
+ host=default_host, port=default_port, cluster_slots=cluster_slots
+ )
assert str(ex.value).startswith(
- "All slots are not covered after query all startup_nodes.")
+ "All slots are not covered after query all startup_nodes."
+ )
def test_init_slots_cache_not_require_full_coverage_error(self):
"""
@@ -1750,18 +1829,19 @@ class TestNodesManager:
"""
# Missing slot 5460
cluster_slots = [
- [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
- [5461, 10922, ['127.0.0.1', 7001],
- ['127.0.0.1', 7004]],
- [10923, 16383, ['127.0.0.1', 7002],
- ['127.0.0.1', 7005]],
+ [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+ [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+ [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
]
with pytest.raises(RedisClusterException):
- get_mocked_redis_client(host=default_host, port=default_port,
- cluster_slots=cluster_slots,
- require_full_coverage=False,
- coverage_result='yes')
+ get_mocked_redis_client(
+ host=default_host,
+ port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ coverage_result="yes",
+ )
def test_init_slots_cache_not_require_full_coverage_success(self):
"""
@@ -1771,17 +1851,18 @@ class TestNodesManager:
"""
# Missing slot 5460
cluster_slots = [
- [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
- [5461, 10922, ['127.0.0.1', 7001],
- ['127.0.0.1', 7004]],
- [10923, 16383, ['127.0.0.1', 7002],
- ['127.0.0.1', 7005]],
+ [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+ [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+ [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
]
- rc = get_mocked_redis_client(host=default_host, port=default_port,
- cluster_slots=cluster_slots,
- require_full_coverage=False,
- coverage_result='no')
+ rc = get_mocked_redis_client(
+ host=default_host,
+ port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ coverage_result="no",
+ )
assert 5460 not in rc.nodes_manager.slots_cache
@@ -1793,20 +1874,22 @@ class TestNodesManager:
"""
# Missing slot 5460
cluster_slots = [
- [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
- [5461, 10922, ['127.0.0.1', 7001],
- ['127.0.0.1', 7004]],
- [10923, 16383, ['127.0.0.1', 7002],
- ['127.0.0.1', 7005]],
+ [0, 5459, ["127.0.0.1", 7000], ["127.0.0.1", 7003]],
+ [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.1", 7004]],
+ [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.1", 7005]],
]
- with patch.object(NodesManager,
- 'cluster_require_full_coverage') as conf_check_mock:
- rc = get_mocked_redis_client(host=default_host, port=default_port,
- cluster_slots=cluster_slots,
- require_full_coverage=False,
- skip_full_coverage_check=True,
- coverage_result='no')
+ with patch.object(
+ NodesManager, "cluster_require_full_coverage"
+ ) as conf_check_mock:
+ rc = get_mocked_redis_client(
+ host=default_host,
+ port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ skip_full_coverage_check=True,
+ coverage_result="no",
+ )
assert conf_check_mock.called is False
assert 5460 not in rc.nodes_manager.slots_cache
@@ -1816,17 +1899,18 @@ class TestNodesManager:
Test that slots cache can in initialized and all slots are covered
"""
good_slots_resp = [
- [0, 5460, ['127.0.0.1', 7000], ['127.0.0.2', 7003]],
- [5461, 10922, ['127.0.0.1', 7001], ['127.0.0.2', 7004]],
- [10923, 16383, ['127.0.0.1', 7002], ['127.0.0.2', 7005]],
+ [0, 5460, ["127.0.0.1", 7000], ["127.0.0.2", 7003]],
+ [5461, 10922, ["127.0.0.1", 7001], ["127.0.0.2", 7004]],
+ [10923, 16383, ["127.0.0.1", 7002], ["127.0.0.2", 7005]],
]
- rc = get_mocked_redis_client(host=default_host, port=default_port,
- cluster_slots=good_slots_resp)
+ rc = get_mocked_redis_client(
+ host=default_host, port=default_port, cluster_slots=good_slots_resp
+ )
n_manager = rc.nodes_manager
assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
for slot_info in good_slots_resp:
- all_hosts = ['127.0.0.1', '127.0.0.2']
+ all_hosts = ["127.0.0.1", "127.0.0.2"]
all_ports = [7000, 7001, 7002, 7003, 7004, 7005]
slot_start = slot_info[0]
slot_end = slot_info[1]
@@ -1861,8 +1945,8 @@ class TestNodesManager:
raise an error. In this test both nodes will say that the first
slots block should be bound to different servers.
"""
- with patch.object(NodesManager,
- 'create_redis_node') as create_redis_node:
+ with patch.object(NodesManager, "create_redis_node") as create_redis_node:
+
def create_mocked_redis_node(host, port, **kwargs):
"""
Helper function to return custom slots cache data from
@@ -1873,14 +1957,14 @@ class TestNodesManager:
[
0,
5460,
- ['127.0.0.1', 7000],
- ['127.0.0.1', 7003],
+ ["127.0.0.1", 7000],
+ ["127.0.0.1", 7003],
],
[
5461,
10922,
- ['127.0.0.1', 7001],
- ['127.0.0.1', 7004],
+ ["127.0.0.1", 7001],
+ ["127.0.0.1", 7004],
],
]
@@ -1889,31 +1973,28 @@ class TestNodesManager:
[
0,
5460,
- ['127.0.0.1', 7001],
- ['127.0.0.1', 7003],
+ ["127.0.0.1", 7001],
+ ["127.0.0.1", 7003],
],
[
5461,
10922,
- ['127.0.0.1', 7000],
- ['127.0.0.1', 7004],
+ ["127.0.0.1", 7000],
+ ["127.0.0.1", 7004],
],
]
else:
result = []
- r_node = Redis(
- host=host,
- port=port
- )
+ r_node = Redis(host=host, port=port)
orig_execute_command = r_node.execute_command
def execute_command(*args, **kwargs):
- if args[0] == 'CLUSTER SLOTS':
+ if args[0] == "CLUSTER SLOTS":
return result
- elif args[1] == 'cluster-require-full-coverage':
- return {'cluster-require-full-coverage': 'yes'}
+ elif args[1] == "cluster-require-full-coverage":
+ return {"cluster-require-full-coverage": "yes"}
else:
return orig_execute_command(*args, **kwargs)
@@ -1923,12 +2004,12 @@ class TestNodesManager:
create_redis_node.side_effect = create_mocked_redis_node
with pytest.raises(RedisClusterException) as ex:
- node_1 = ClusterNode('127.0.0.1', 7000)
- node_2 = ClusterNode('127.0.0.1', 7001)
+ node_1 = ClusterNode("127.0.0.1", 7000)
+ node_2 = ClusterNode("127.0.0.1", 7001)
RedisCluster(startup_nodes=[node_1, node_2])
assert str(ex.value).startswith(
- "startup_nodes could not agree on a valid slots cache"), str(
- ex.value)
+ "startup_nodes could not agree on a valid slots cache"
+ ), str(ex.value)
def test_cluster_one_instance(self):
"""
@@ -1936,9 +2017,8 @@ class TestNodesManager:
be validated they work.
"""
node = ClusterNode(default_host, default_port)
- cluster_slots = [[0, 16383, ['', default_port]]]
- rc = get_mocked_redis_client(startup_nodes=[node],
- cluster_slots=cluster_slots)
+ cluster_slots = [[0, 16383, ["", default_port]]]
+ rc = get_mocked_redis_client(startup_nodes=[node], cluster_slots=cluster_slots)
n = rc.nodes_manager
assert len(n.nodes_cache) == 1
@@ -1955,28 +2035,30 @@ class TestNodesManager:
If I can't connect to one of the nodes, everything should still work.
But if I can't connect to any of the nodes, exception should be thrown.
"""
- with patch.object(NodesManager,
- 'create_redis_node') as create_redis_node:
+ with patch.object(NodesManager, "create_redis_node") as create_redis_node:
+
def create_mocked_redis_node(host, port, **kwargs):
if port == 7000:
- raise ConnectionError('mock connection error for 7000')
+ raise ConnectionError("mock connection error for 7000")
r_node = Redis(host=host, port=port, decode_responses=True)
def execute_command(*args, **kwargs):
- if args[0] == 'CLUSTER SLOTS':
+ if args[0] == "CLUSTER SLOTS":
return [
[
- 0, 8191,
- ['127.0.0.1', 7001, 'node_1'],
+ 0,
+ 8191,
+ ["127.0.0.1", 7001, "node_1"],
],
[
- 8192, 16383,
- ['127.0.0.1', 7002, 'node_2'],
- ]
+ 8192,
+ 16383,
+ ["127.0.0.1", 7002, "node_2"],
+ ],
]
- elif args[1] == 'cluster-require-full-coverage':
- return {'cluster-require-full-coverage': 'yes'}
+ elif args[1] == "cluster-require-full-coverage":
+ return {"cluster-require-full-coverage": "yes"}
r_node.execute_command = execute_command
@@ -1984,25 +2066,30 @@ class TestNodesManager:
create_redis_node.side_effect = create_mocked_redis_node
- node_1 = ClusterNode('127.0.0.1', 7000)
- node_2 = ClusterNode('127.0.0.1', 7001)
+ node_1 = ClusterNode("127.0.0.1", 7000)
+ node_2 = ClusterNode("127.0.0.1", 7001)
# If all startup nodes fail to connect, connection error should be
# thrown
with pytest.raises(RedisClusterException) as e:
RedisCluster(startup_nodes=[node_1])
- assert 'Redis Cluster cannot be connected' in str(e.value)
+ assert "Redis Cluster cannot be connected" in str(e.value)
- with patch.object(CommandsParser, 'initialize',
- autospec=True) as cmd_parser_initialize:
+ with patch.object(
+ CommandsParser, "initialize", autospec=True
+ ) as cmd_parser_initialize:
def cmd_init_mock(self, r):
- self.commands = {'get': {'name': 'get', 'arity': 2,
- 'flags': ['readonly',
- 'fast'],
- 'first_key_pos': 1,
- 'last_key_pos': 1,
- 'step_count': 1}}
+ self.commands = {
+ "get": {
+ "name": "get",
+ "arity": 2,
+ "flags": ["readonly", "fast"],
+ "first_key_pos": 1,
+ "last_key_pos": 1,
+ "step_count": 1,
+ }
+ }
cmd_parser_initialize.side_effect = cmd_init_mock
# When at least one startup node is reachable, the cluster
@@ -2040,7 +2127,7 @@ class TestClusterPubSubObject:
should be determined based on the keyslot of the first command
execution.
"""
- channel_name = 'foo'
+ channel_name = "foo"
node = r.get_node_from_key(channel_name)
p = r.pubsub()
assert p.get_pubsub_node() is None
@@ -2052,7 +2139,7 @@ class TestClusterPubSubObject:
Test creation of pubsub instance with node that doesn't exists in the
cluster. RedisClusterException should be raised.
"""
- node = ClusterNode('1.1.1.1', 1111)
+ node = ClusterNode("1.1.1.1", 1111)
with pytest.raises(RedisClusterException):
r.pubsub(node)
@@ -2063,7 +2150,7 @@ class TestClusterPubSubObject:
RedisClusterException should be raised.
"""
with pytest.raises(RedisClusterException):
- r.pubsub(host='1.1.1.1', port=1111)
+ r.pubsub(host="1.1.1.1", port=1111)
def test_init_pubsub_host_or_port(self, r):
"""
@@ -2071,7 +2158,7 @@ class TestClusterPubSubObject:
versa. DataError should be raised.
"""
with pytest.raises(DataError):
- r.pubsub(host='localhost')
+ r.pubsub(host="localhost")
with pytest.raises(DataError):
r.pubsub(port=16379)
@@ -2131,14 +2218,17 @@ class TestClusterPipeline:
with pytest.raises(RedisClusterException) as ex:
r.pipeline(transaction=True)
- assert str(ex.value).startswith(
- "transaction is deprecated in cluster mode") is True
+ assert (
+ str(ex.value).startswith("transaction is deprecated in cluster mode")
+ is True
+ )
with pytest.raises(RedisClusterException) as ex:
r.pipeline(shard_hint=True)
- assert str(ex.value).startswith(
- "shard_hint is deprecated in cluster mode") is True
+ assert (
+ str(ex.value).startswith("shard_hint is deprecated in cluster mode") is True
+ )
def test_redis_cluster_pipeline(self, r):
"""
@@ -2147,7 +2237,7 @@ class TestClusterPipeline:
with r.pipeline() as pipe:
pipe.set("foo", "bar")
pipe.get("foo")
- assert pipe.execute() == [True, b'bar']
+ assert pipe.execute() == [True, b"bar"]
def test_mget_disabled(self, r):
"""
@@ -2155,7 +2245,7 @@ class TestClusterPipeline:
"""
with r.pipeline() as pipe:
with pytest.raises(RedisClusterException):
- pipe.mget(['a'])
+ pipe.mget(["a"])
def test_mset_disabled(self, r):
"""
@@ -2163,7 +2253,7 @@ class TestClusterPipeline:
"""
with r.pipeline() as pipe:
with pytest.raises(RedisClusterException):
- pipe.mset({'a': 1, 'b': 2})
+ pipe.mset({"a": 1, "b": 2})
def test_rename_disabled(self, r):
"""
@@ -2171,7 +2261,7 @@ class TestClusterPipeline:
"""
with r.pipeline(transaction=False) as pipe:
with pytest.raises(RedisClusterException):
- pipe.rename('a', 'b')
+ pipe.rename("a", "b")
def test_renamenx_disabled(self, r):
"""
@@ -2179,15 +2269,15 @@ class TestClusterPipeline:
"""
with r.pipeline(transaction=False) as pipe:
with pytest.raises(RedisClusterException):
- pipe.renamenx('a', 'b')
+ pipe.renamenx("a", "b")
def test_delete_single(self, r):
"""
Test a single delete operation
"""
- r['a'] = 1
+ r["a"] = 1
with r.pipeline(transaction=False) as pipe:
- pipe.delete('a')
+ pipe.delete("a")
assert pipe.execute() == [1]
def test_multi_delete_unsupported(self, r):
@@ -2195,10 +2285,10 @@ class TestClusterPipeline:
Test that multi delete operation is unsupported
"""
with r.pipeline(transaction=False) as pipe:
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
with pytest.raises(RedisClusterException):
- pipe.delete('a', 'b')
+ pipe.delete("a", "b")
def test_brpoplpush_disabled(self, r):
"""
@@ -2293,41 +2383,40 @@ class TestClusterPipeline:
Test multi key operation with a single slot
"""
pipe = r.pipeline(transaction=False)
- pipe.set('a{foo}', 1)
- pipe.set('b{foo}', 2)
- pipe.set('c{foo}', 3)
- pipe.get('a{foo}')
- pipe.get('b{foo}')
- pipe.get('c{foo}')
+ pipe.set("a{foo}", 1)
+ pipe.set("b{foo}", 2)
+ pipe.set("c{foo}", 3)
+ pipe.get("a{foo}")
+ pipe.get("b{foo}")
+ pipe.get("c{foo}")
res = pipe.execute()
- assert res == [True, True, True, b'1', b'2', b'3']
+ assert res == [True, True, True, b"1", b"2", b"3"]
def test_multi_key_operation_with_multi_slots(self, r):
"""
Test multi key operation with more than one slot
"""
pipe = r.pipeline(transaction=False)
- pipe.set('a{foo}', 1)
- pipe.set('b{foo}', 2)
- pipe.set('c{foo}', 3)
- pipe.set('bar', 4)
- pipe.set('bazz', 5)
- pipe.get('a{foo}')
- pipe.get('b{foo}')
- pipe.get('c{foo}')
- pipe.get('bar')
- pipe.get('bazz')
+ pipe.set("a{foo}", 1)
+ pipe.set("b{foo}", 2)
+ pipe.set("c{foo}", 3)
+ pipe.set("bar", 4)
+ pipe.set("bazz", 5)
+ pipe.get("a{foo}")
+ pipe.get("b{foo}")
+ pipe.get("c{foo}")
+ pipe.get("bar")
+ pipe.get("bazz")
res = pipe.execute()
- assert res == [True, True, True, True, True, b'1', b'2', b'3', b'4',
- b'5']
+ assert res == [True, True, True, True, True, b"1", b"2", b"3", b"4", b"5"]
def test_connection_error_not_raised(self, r):
"""
Test that the pipeline doesn't raise an error on connection error when
raise_on_error=False
"""
- key = 'foo'
+ key = "foo"
node = r.get_node_from_key(key, False)
def raise_connection_error():
@@ -2345,7 +2434,7 @@ class TestClusterPipeline:
Test that the pipeline raises an error on connection error when
raise_on_error=True
"""
- key = 'foo'
+ key = "foo"
node = r.get_node_from_key(key, False)
def raise_connection_error():
@@ -2361,7 +2450,7 @@ class TestClusterPipeline:
"""
Test redirection on ASK error
"""
- key = 'foo'
+ key = "foo"
first_node = r.get_node_from_key(key, False)
ask_node = None
for node in r.get_nodes():
@@ -2369,8 +2458,7 @@ class TestClusterPipeline:
ask_node = node
break
if ask_node is None:
- warnings.warn("skipping this test since the cluster has only one "
- "node")
+ warnings.warn("skipping this test since the cluster has only one " "node")
return
ask_msg = f"{r.keyslot(key)} {ask_node.host}:{ask_node.port}"
@@ -2379,11 +2467,11 @@ class TestClusterPipeline:
with r.pipeline() as pipe:
mock_node_resp_func(first_node, raise_ask_error)
- mock_node_resp(ask_node, 'MOCK_OK')
+ mock_node_resp(ask_node, "MOCK_OK")
res = pipe.get(key).execute()
assert first_node.redis_connection.connection.read_response.called
assert ask_node.redis_connection.connection.read_response.called
- assert res == ['MOCK_OK']
+ assert res == ["MOCK_OK"]
def test_empty_stack(self, r):
"""
@@ -2405,17 +2493,16 @@ class TestReadOnlyPipeline:
"""
On readonly mode, we supports get related stuff only.
"""
- r.readonly(target_nodes='all')
- r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001
- r.zadd('foo88',
- {'z1': 1}) # we assume this key is set on 127.0.0.1:7002
- r.zadd('foo88', {'z2': 4})
+ r.readonly(target_nodes="all")
+ r.set("foo71", "a1") # we assume this key is set on 127.0.0.1:7001
+ r.zadd("foo88", {"z1": 1}) # we assume this key is set on 127.0.0.1:7002
+ r.zadd("foo88", {"z2": 4})
with r.pipeline() as readonly_pipe:
- readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True)
+ readonly_pipe.get("foo71").zrange("foo88", 0, 5, withscores=True)
assert readonly_pipe.execute() == [
- b'a1',
- [(b'z1', 1.0), (b'z2', 4)],
+ b"a1",
+ [(b"z1", 1.0), (b"z2", 4)],
]
def test_moved_redirection_on_slave_with_default(self, r):
@@ -2423,8 +2510,8 @@ class TestReadOnlyPipeline:
On Pipeline, we redirected once and finally get from master with
readonly client when data is completely moved.
"""
- key = 'bar'
- r.set(key, 'foo')
+ key = "bar"
+ r.set(key, "foo")
# set read_from_replicas to True
r.read_from_replicas = True
primary = r.get_node_from_key(key, False)
@@ -2456,15 +2543,15 @@ class TestReadOnlyPipeline:
"""
# Create a cluster with reading from replications
ro = _get_client(RedisCluster, request, read_from_replicas=True)
- key = 'bar'
- ro.set(key, 'foo')
+ key = "bar"
+ ro.set(key, "foo")
import time
+
time.sleep(0.2)
with ro.pipeline() as readonly_pipe:
- mock_all_nodes_resp(ro, 'MOCK_OK')
+ mock_all_nodes_resp(ro, "MOCK_OK")
assert readonly_pipe.read_from_replicas is True
- assert readonly_pipe.get(key).get(
- key).execute() == ['MOCK_OK', 'MOCK_OK']
+ assert readonly_pipe.get(key).get(key).execute() == ["MOCK_OK", "MOCK_OK"]
slot_nodes = ro.nodes_manager.slots_cache[ro.keyslot(key)]
if len(slot_nodes) > 1:
executed_on_replica = False
diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py
index ba129ba..ad29e69 100644
--- a/tests/test_command_parser.py
+++ b/tests/test_command_parser.py
@@ -7,56 +7,74 @@ class TestCommandsParser:
def test_init_commands(self, r):
commands_parser = CommandsParser(r)
assert commands_parser.commands is not None
- assert 'get' in commands_parser.commands
+ assert "get" in commands_parser.commands
def test_get_keys_predetermined_key_location(self, r):
commands_parser = CommandsParser(r)
- args1 = ['GET', 'foo']
- args2 = ['OBJECT', 'encoding', 'foo']
- args3 = ['MGET', 'foo', 'bar', 'foobar']
- assert commands_parser.get_keys(r, *args1) == ['foo']
- assert commands_parser.get_keys(r, *args2) == ['foo']
- assert commands_parser.get_keys(r, *args3) == ['foo', 'bar', 'foobar']
+ args1 = ["GET", "foo"]
+ args2 = ["OBJECT", "encoding", "foo"]
+ args3 = ["MGET", "foo", "bar", "foobar"]
+ assert commands_parser.get_keys(r, *args1) == ["foo"]
+ assert commands_parser.get_keys(r, *args2) == ["foo"]
+ assert commands_parser.get_keys(r, *args3) == ["foo", "bar", "foobar"]
@pytest.mark.filterwarnings("ignore:ResponseError")
def test_get_moveable_keys(self, r):
commands_parser = CommandsParser(r)
- args1 = ['EVAL', 'return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}', 2, 'key1',
- 'key2', 'first', 'second']
- args2 = ['XREAD', 'COUNT', 2, b'STREAMS', 'mystream', 'writers', 0, 0]
- args3 = ['ZUNIONSTORE', 'out', 2, 'zset1', 'zset2', 'WEIGHTS', 2, 3]
- args4 = ['GEORADIUS', 'Sicily', 15, 37, 200, 'km', 'WITHCOORD',
- b'STORE', 'out']
- args5 = ['MEMORY USAGE', 'foo']
- args6 = ['MIGRATE', '192.168.1.34', 6379, "", 0, 5000, b'KEYS',
- 'key1', 'key2', 'key3']
- args7 = ['MIGRATE', '192.168.1.34', 6379, "key1", 0, 5000]
- args8 = ['STRALGO', 'LCS', 'STRINGS', 'string_a', 'string_b']
- args9 = ['STRALGO', 'LCS', 'KEYS', 'key1', 'key2']
+ args1 = [
+ "EVAL",
+ "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
+ 2,
+ "key1",
+ "key2",
+ "first",
+ "second",
+ ]
+ args2 = ["XREAD", "COUNT", 2, b"STREAMS", "mystream", "writers", 0, 0]
+ args3 = ["ZUNIONSTORE", "out", 2, "zset1", "zset2", "WEIGHTS", 2, 3]
+ args4 = ["GEORADIUS", "Sicily", 15, 37, 200, "km", "WITHCOORD", b"STORE", "out"]
+ args5 = ["MEMORY USAGE", "foo"]
+ args6 = [
+ "MIGRATE",
+ "192.168.1.34",
+ 6379,
+ "",
+ 0,
+ 5000,
+ b"KEYS",
+ "key1",
+ "key2",
+ "key3",
+ ]
+ args7 = ["MIGRATE", "192.168.1.34", 6379, "key1", 0, 5000]
+ args8 = ["STRALGO", "LCS", "STRINGS", "string_a", "string_b"]
+ args9 = ["STRALGO", "LCS", "KEYS", "key1", "key2"]
- assert commands_parser.get_keys(
- r, *args1).sort() == ['key1', 'key2'].sort()
- assert commands_parser.get_keys(
- r, *args2).sort() == ['mystream', 'writers'].sort()
- assert commands_parser.get_keys(
- r, *args3).sort() == ['out', 'zset1', 'zset2'].sort()
- assert commands_parser.get_keys(
- r, *args4).sort() == ['Sicily', 'out'].sort()
- assert commands_parser.get_keys(r, *args5).sort() == ['foo'].sort()
- assert commands_parser.get_keys(
- r, *args6).sort() == ['key1', 'key2', 'key3'].sort()
- assert commands_parser.get_keys(r, *args7).sort() == ['key1'].sort()
+ assert commands_parser.get_keys(r, *args1).sort() == ["key1", "key2"].sort()
+ assert (
+ commands_parser.get_keys(r, *args2).sort() == ["mystream", "writers"].sort()
+ )
+ assert (
+ commands_parser.get_keys(r, *args3).sort()
+ == ["out", "zset1", "zset2"].sort()
+ )
+ assert commands_parser.get_keys(r, *args4).sort() == ["Sicily", "out"].sort()
+ assert commands_parser.get_keys(r, *args5).sort() == ["foo"].sort()
+ assert (
+ commands_parser.get_keys(r, *args6).sort()
+ == ["key1", "key2", "key3"].sort()
+ )
+ assert commands_parser.get_keys(r, *args7).sort() == ["key1"].sort()
assert commands_parser.get_keys(r, *args8) is None
- assert commands_parser.get_keys(
- r, *args9).sort() == ['key1', 'key2'].sort()
+ assert commands_parser.get_keys(r, *args9).sort() == ["key1", "key2"].sort()
def test_get_pubsub_keys(self, r):
commands_parser = CommandsParser(r)
- args1 = ['PUBLISH', 'foo', 'bar']
- args2 = ['PUBSUB NUMSUB', 'foo1', 'foo2', 'foo3']
- args3 = ['PUBSUB channels', '*']
- args4 = ['SUBSCRIBE', 'foo1', 'foo2', 'foo3']
- assert commands_parser.get_keys(r, *args1) == ['foo']
- assert commands_parser.get_keys(r, *args2) == ['foo1', 'foo2', 'foo3']
- assert commands_parser.get_keys(r, *args3) == ['*']
- assert commands_parser.get_keys(r, *args4) == ['foo1', 'foo2', 'foo3']
+ args1 = ["PUBLISH", "foo", "bar"]
+ args2 = ["PUBSUB NUMSUB", "foo1", "foo2", "foo3"]
+ args3 = ["PUBSUB channels", "*"]
+ args4 = ["SUBSCRIBE", "foo1", "foo2", "foo3"]
+ assert commands_parser.get_keys(r, *args1) == ["foo"]
+ assert commands_parser.get_keys(r, *args2) == ["foo1", "foo2", "foo3"]
+ assert commands_parser.get_keys(r, *args3) == ["*"]
+ assert commands_parser.get_keys(r, *args4) == ["foo1", "foo2", "foo3"]
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 444a163..1eb35f8 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -1,19 +1,20 @@
import binascii
import datetime
-import pytest
import re
-import redis
import time
from string import ascii_letters
-from redis.client import parse_info
+import pytest
+
+import redis
from redis import exceptions
+from redis.client import parse_info
from .conftest import (
_get_client,
+ skip_if_redis_enterprise,
skip_if_server_version_gte,
skip_if_server_version_lt,
- skip_if_redis_enterprise,
skip_unless_arch_bits,
)
@@ -21,21 +22,22 @@ from .conftest import (
@pytest.fixture()
def slowlog(request, r):
current_config = r.config_get()
- old_slower_than_value = current_config['slowlog-log-slower-than']
- old_max_legnth_value = current_config['slowlog-max-len']
+ old_slower_than_value = current_config["slowlog-log-slower-than"]
+ old_max_legnth_value = current_config["slowlog-max-len"]
def cleanup():
- r.config_set('slowlog-log-slower-than', old_slower_than_value)
- r.config_set('slowlog-max-len', old_max_legnth_value)
+ r.config_set("slowlog-log-slower-than", old_slower_than_value)
+ r.config_set("slowlog-max-len", old_max_legnth_value)
+
request.addfinalizer(cleanup)
- r.config_set('slowlog-log-slower-than', 0)
- r.config_set('slowlog-max-len', 128)
+ r.config_set("slowlog-log-slower-than", 0)
+ r.config_set("slowlog-max-len", 128)
def redis_server_time(client):
seconds, milliseconds = client.time()
- timestamp = float(f'{seconds}.{milliseconds}')
+ timestamp = float(f"{seconds}.{milliseconds}")
return datetime.datetime.fromtimestamp(timestamp)
@@ -54,19 +56,19 @@ class TestResponseCallbacks:
def test_response_callbacks(self, r):
assert r.response_callbacks == redis.Redis.RESPONSE_CALLBACKS
assert id(r.response_callbacks) != id(redis.Redis.RESPONSE_CALLBACKS)
- r.set_response_callback('GET', lambda x: 'static')
- r['a'] = 'foo'
- assert r['a'] == 'static'
+ r.set_response_callback("GET", lambda x: "static")
+ r["a"] = "foo"
+ assert r["a"] == "static"
def test_case_insensitive_command_names(self, r):
- assert r.response_callbacks['del'] == r.response_callbacks['DEL']
+ assert r.response_callbacks["del"] == r.response_callbacks["DEL"]
class TestRedisCommands:
def test_command_on_invalid_key_type(self, r):
- r.lpush('a', '1')
+ r.lpush("a", "1")
with pytest.raises(redis.ResponseError):
- r['a']
+ r["a"]
# SERVER INFORMATION
@pytest.mark.onlynoncluster
@@ -74,20 +76,20 @@ class TestRedisCommands:
def test_acl_cat_no_category(self, r):
categories = r.acl_cat()
assert isinstance(categories, list)
- assert 'read' in categories
+ assert "read" in categories
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_cat_with_category(self, r):
- commands = r.acl_cat('read')
+ commands = r.acl_cat("read")
assert isinstance(commands, list)
- assert 'get' in commands
+ assert "get" in commands
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_deluser(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
@@ -99,7 +101,7 @@ class TestRedisCommands:
assert r.acl_deluser(username) == 1
# now, a group of users
- users = [f'bogususer_{r}' for r in range(0, 5)]
+ users = [f"bogususer_{r}" for r in range(0, 5)]
for u in users:
r.acl_setuser(u, enabled=False, reset=True)
assert r.acl_deluser(*users) > 1
@@ -117,7 +119,7 @@ class TestRedisCommands:
assert isinstance(password, str)
with pytest.raises(exceptions.DataError):
- r.acl_genpass('value')
+ r.acl_genpass("value")
r.acl_genpass(-5)
r.acl_genpass(5555)
@@ -128,90 +130,109 @@ class TestRedisCommands:
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_getuser_setuser(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
+
request.addfinalizer(teardown)
# test enabled=False
assert r.acl_setuser(username, enabled=False, reset=True)
acl = r.acl_getuser(username)
- assert acl['categories'] == ['-@all']
- assert acl['commands'] == []
- assert acl['keys'] == []
- assert acl['passwords'] == []
- assert 'off' in acl['flags']
- assert acl['enabled'] is False
+ assert acl["categories"] == ["-@all"]
+ assert acl["commands"] == []
+ assert acl["keys"] == []
+ assert acl["passwords"] == []
+ assert "off" in acl["flags"]
+ assert acl["enabled"] is False
# test nopass=True
assert r.acl_setuser(username, enabled=True, reset=True, nopass=True)
acl = r.acl_getuser(username)
- assert acl['categories'] == ['-@all']
- assert acl['commands'] == []
- assert acl['keys'] == []
- assert acl['passwords'] == []
- assert 'on' in acl['flags']
- assert 'nopass' in acl['flags']
- assert acl['enabled'] is True
+ assert acl["categories"] == ["-@all"]
+ assert acl["commands"] == []
+ assert acl["keys"] == []
+ assert acl["passwords"] == []
+ assert "on" in acl["flags"]
+ assert "nopass" in acl["flags"]
+ assert acl["enabled"] is True
# test all args
- assert r.acl_setuser(username, enabled=True, reset=True,
- passwords=['+pass1', '+pass2'],
- categories=['+set', '+@hash', '-geo'],
- commands=['+get', '+mget', '-hset'],
- keys=['cache:*', 'objects:*'])
+ assert r.acl_setuser(
+ username,
+ enabled=True,
+ reset=True,
+ passwords=["+pass1", "+pass2"],
+ categories=["+set", "+@hash", "-geo"],
+ commands=["+get", "+mget", "-hset"],
+ keys=["cache:*", "objects:*"],
+ )
acl = r.acl_getuser(username)
- assert set(acl['categories']) == {'-@all', '+@set', '+@hash'}
- assert set(acl['commands']) == {'+get', '+mget', '-hset'}
- assert acl['enabled'] is True
- assert 'on' in acl['flags']
- assert set(acl['keys']) == {b'cache:*', b'objects:*'}
- assert len(acl['passwords']) == 2
+ assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"}
+ assert set(acl["commands"]) == {"+get", "+mget", "-hset"}
+ assert acl["enabled"] is True
+ assert "on" in acl["flags"]
+ assert set(acl["keys"]) == {b"cache:*", b"objects:*"}
+ assert len(acl["passwords"]) == 2
# test reset=False keeps existing ACL and applies new ACL on top
- assert r.acl_setuser(username, enabled=True, reset=True,
- passwords=['+pass1'],
- categories=['+@set'],
- commands=['+get'],
- keys=['cache:*'])
- assert r.acl_setuser(username, enabled=True,
- passwords=['+pass2'],
- categories=['+@hash'],
- commands=['+mget'],
- keys=['objects:*'])
+ assert r.acl_setuser(
+ username,
+ enabled=True,
+ reset=True,
+ passwords=["+pass1"],
+ categories=["+@set"],
+ commands=["+get"],
+ keys=["cache:*"],
+ )
+ assert r.acl_setuser(
+ username,
+ enabled=True,
+ passwords=["+pass2"],
+ categories=["+@hash"],
+ commands=["+mget"],
+ keys=["objects:*"],
+ )
acl = r.acl_getuser(username)
- assert set(acl['categories']) == {'-@all', '+@set', '+@hash'}
- assert set(acl['commands']) == {'+get', '+mget'}
- assert acl['enabled'] is True
- assert 'on' in acl['flags']
- assert set(acl['keys']) == {b'cache:*', b'objects:*'}
- assert len(acl['passwords']) == 2
+ assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"}
+ assert set(acl["commands"]) == {"+get", "+mget"}
+ assert acl["enabled"] is True
+ assert "on" in acl["flags"]
+ assert set(acl["keys"]) == {b"cache:*", b"objects:*"}
+ assert len(acl["passwords"]) == 2
# test removal of passwords
- assert r.acl_setuser(username, enabled=True, reset=True,
- passwords=['+pass1', '+pass2'])
- assert len(r.acl_getuser(username)['passwords']) == 2
- assert r.acl_setuser(username, enabled=True,
- passwords=['-pass2'])
- assert len(r.acl_getuser(username)['passwords']) == 1
+ assert r.acl_setuser(
+ username, enabled=True, reset=True, passwords=["+pass1", "+pass2"]
+ )
+ assert len(r.acl_getuser(username)["passwords"]) == 2
+ assert r.acl_setuser(username, enabled=True, passwords=["-pass2"])
+ assert len(r.acl_getuser(username)["passwords"]) == 1
# Resets and tests that hashed passwords are set properly.
- hashed_password = ('5e884898da28047151d0e56f8dc629'
- '2773603d0d6aabbdd62a11ef721d1542d8')
- assert r.acl_setuser(username, enabled=True, reset=True,
- hashed_passwords=['+' + hashed_password])
+ hashed_password = (
+ "5e884898da28047151d0e56f8dc629" "2773603d0d6aabbdd62a11ef721d1542d8"
+ )
+ assert r.acl_setuser(
+ username, enabled=True, reset=True, hashed_passwords=["+" + hashed_password]
+ )
acl = r.acl_getuser(username)
- assert acl['passwords'] == [hashed_password]
+ assert acl["passwords"] == [hashed_password]
# test removal of hashed passwords
- assert r.acl_setuser(username, enabled=True, reset=True,
- hashed_passwords=['+' + hashed_password],
- passwords=['+pass1'])
- assert len(r.acl_getuser(username)['passwords']) == 2
- assert r.acl_setuser(username, enabled=True,
- hashed_passwords=['-' + hashed_password])
- assert len(r.acl_getuser(username)['passwords']) == 1
+ assert r.acl_setuser(
+ username,
+ enabled=True,
+ reset=True,
+ hashed_passwords=["+" + hashed_password],
+ passwords=["+pass1"],
+ )
+ assert len(r.acl_getuser(username)["passwords"]) == 2
+ assert r.acl_setuser(
+ username, enabled=True, hashed_passwords=["-" + hashed_password]
+ )
+ assert len(r.acl_getuser(username)["passwords"]) == 1
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@@ -224,10 +245,11 @@ class TestRedisCommands:
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_list(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
+
request.addfinalizer(teardown)
assert r.acl_setuser(username, enabled=False, reset=True)
@@ -238,77 +260,86 @@ class TestRedisCommands:
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_log(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
request.addfinalizer(teardown)
- r.acl_setuser(username, enabled=True, reset=True,
- commands=['+get', '+set', '+select'],
- keys=['cache:*'], nopass=True)
+ r.acl_setuser(
+ username,
+ enabled=True,
+ reset=True,
+ commands=["+get", "+set", "+select"],
+ keys=["cache:*"],
+ nopass=True,
+ )
r.acl_log_reset()
- user_client = _get_client(redis.Redis, request, flushdb=False,
- username=username)
+ user_client = _get_client(
+ redis.Redis, request, flushdb=False, username=username
+ )
# Valid operation and key
- assert user_client.set('cache:0', 1)
- assert user_client.get('cache:0') == b'1'
+ assert user_client.set("cache:0", 1)
+ assert user_client.get("cache:0") == b"1"
# Invalid key
with pytest.raises(exceptions.NoPermissionError):
- user_client.get('violated_cache:0')
+ user_client.get("violated_cache:0")
# Invalid operation
with pytest.raises(exceptions.NoPermissionError):
- user_client.hset('cache:0', 'hkey', 'hval')
+ user_client.hset("cache:0", "hkey", "hval")
assert isinstance(r.acl_log(), list)
assert len(r.acl_log()) == 2
assert len(r.acl_log(count=1)) == 1
assert isinstance(r.acl_log()[0], dict)
- assert 'client-info' in r.acl_log(count=1)[0]
+ assert "client-info" in r.acl_log(count=1)[0]
assert r.acl_log_reset()
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_categories_without_prefix_fails(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
+
request.addfinalizer(teardown)
with pytest.raises(exceptions.DataError):
- r.acl_setuser(username, categories=['list'])
+ r.acl_setuser(username, categories=["list"])
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_commands_without_prefix_fails(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
+
request.addfinalizer(teardown)
with pytest.raises(exceptions.DataError):
- r.acl_setuser(username, commands=['get'])
+ r.acl_setuser(username, commands=["get"])
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_add_passwords_and_nopass_fails(self, r, request):
- username = 'redis-py-user'
+ username = "redis-py-user"
def teardown():
r.acl_deluser(username)
+
request.addfinalizer(teardown)
with pytest.raises(exceptions.DataError):
- r.acl_setuser(username, passwords='+mypass', nopass=True)
+ r.acl_setuser(username, passwords="+mypass", nopass=True)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@@ -327,36 +358,36 @@ class TestRedisCommands:
def test_client_list(self, r):
clients = r.client_list()
assert isinstance(clients[0], dict)
- assert 'addr' in clients[0]
+ assert "addr" in clients[0]
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_info(self, r):
info = r.client_info()
assert isinstance(info, dict)
- assert 'addr' in info
+ assert "addr" in info
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_client_list_types_not_replica(self, r):
with pytest.raises(exceptions.RedisError):
- r.client_list(_type='not a client type')
- for client_type in ['normal', 'master', 'pubsub']:
+ r.client_list(_type="not a client type")
+ for client_type in ["normal", "master", "pubsub"]:
clients = r.client_list(_type=client_type)
assert isinstance(clients, list)
@skip_if_redis_enterprise
def test_client_list_replica(self, r):
- clients = r.client_list(_type='replica')
+ clients = r.client_list(_type="replica")
assert isinstance(clients, list)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_list_client_id(self, r, request):
clients = r.client_list()
- clients = r.client_list(client_id=[clients[0]['id']])
+ clients = r.client_list(client_id=[clients[0]["id"]])
assert len(clients) == 1
- assert 'addr' in clients[0]
+ assert "addr" in clients[0]
# testing multiple client ids
_get_client(redis.Redis, request, flushdb=False)
@@ -366,19 +397,19 @@ class TestRedisCommands:
assert len(clients_listed) > 1
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_client_id(self, r):
assert r.client_id() > 0
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_trackinginfo(self, r):
res = r.client_trackinginfo()
assert len(res) > 2
- assert 'prefixes' in res
+ assert "prefixes" in res
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_client_unblock(self, r):
myid = r.client_id()
assert not r.client_unblock(myid)
@@ -386,36 +417,42 @@ class TestRedisCommands:
assert not r.client_unblock(myid, error=False)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.9')
+ @skip_if_server_version_lt("2.6.9")
def test_client_getname(self, r):
assert r.client_getname() is None
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.9')
+ @skip_if_server_version_lt("2.6.9")
def test_client_setname(self, r):
- assert r.client_setname('redis_py_test')
- assert r.client_getname() == 'redis_py_test'
+ assert r.client_setname("redis_py_test")
+ assert r.client_getname() == "redis_py_test"
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.9')
+ @skip_if_server_version_lt("2.6.9")
def test_client_kill(self, r, r2):
- r.client_setname('redis-py-c1')
- r2.client_setname('redis-py-c2')
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ r.client_setname("redis-py-c1")
+ r2.client_setname("redis-py-c2")
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 2
- clients_by_name = {client.get('name'): client for client in clients}
+ clients_by_name = {client.get("name"): client for client in clients}
- client_addr = clients_by_name['redis-py-c2'].get('addr')
+ client_addr = clients_by_name["redis-py-c2"].get("addr")
assert r.client_kill(client_addr) is True
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 1
- assert clients[0].get('name') == 'redis-py-c1'
+ assert clients[0].get("name") == "redis-py-c1"
- @skip_if_server_version_lt('2.8.12')
+ @skip_if_server_version_lt("2.8.12")
def test_client_kill_filter_invalid_params(self, r):
# empty
with pytest.raises(exceptions.DataError):
@@ -430,110 +467,130 @@ class TestRedisCommands:
r.client_kill_filter(_type="caster")
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.12')
+ @skip_if_server_version_lt("2.8.12")
def test_client_kill_filter_by_id(self, r, r2):
- r.client_setname('redis-py-c1')
- r2.client_setname('redis-py-c2')
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ r.client_setname("redis-py-c1")
+ r2.client_setname("redis-py-c2")
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 2
- clients_by_name = {client.get('name'): client for client in clients}
+ clients_by_name = {client.get("name"): client for client in clients}
- client_2_id = clients_by_name['redis-py-c2'].get('id')
+ client_2_id = clients_by_name["redis-py-c2"].get("id")
resp = r.client_kill_filter(_id=client_2_id)
assert resp == 1
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 1
- assert clients[0].get('name') == 'redis-py-c1'
+ assert clients[0].get("name") == "redis-py-c1"
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.12')
+ @skip_if_server_version_lt("2.8.12")
def test_client_kill_filter_by_addr(self, r, r2):
- r.client_setname('redis-py-c1')
- r2.client_setname('redis-py-c2')
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ r.client_setname("redis-py-c1")
+ r2.client_setname("redis-py-c2")
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 2
- clients_by_name = {client.get('name'): client for client in clients}
+ clients_by_name = {client.get("name"): client for client in clients}
- client_2_addr = clients_by_name['redis-py-c2'].get('addr')
+ client_2_addr = clients_by_name["redis-py-c2"].get("addr")
resp = r.client_kill_filter(addr=client_2_addr)
assert resp == 1
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 1
- assert clients[0].get('name') == 'redis-py-c1'
+ assert clients[0].get("name") == "redis-py-c1"
- @skip_if_server_version_lt('2.6.9')
+ @skip_if_server_version_lt("2.6.9")
def test_client_list_after_client_setname(self, r):
- r.client_setname('redis_py_test')
+ r.client_setname("redis_py_test")
clients = r.client_list()
# we don't know which client ours will be
- assert 'redis_py_test' in [c['name'] for c in clients]
+ assert "redis_py_test" in [c["name"] for c in clients]
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_client_kill_filter_by_laddr(self, r, r2):
- r.client_setname('redis-py-c1')
- r2.client_setname('redis-py-c2')
- clients = [client for client in r.client_list()
- if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ r.client_setname("redis-py-c1")
+ r2.client_setname("redis-py-c2")
+ clients = [
+ client
+ for client in r.client_list()
+ if client.get("name") in ["redis-py-c1", "redis-py-c2"]
+ ]
assert len(clients) == 2
- clients_by_name = {client.get('name'): client for client in clients}
+ clients_by_name = {client.get("name"): client for client in clients}
- client_2_addr = clients_by_name['redis-py-c2'].get('laddr')
+ client_2_addr = clients_by_name["redis-py-c2"].get("laddr")
assert r.client_kill_filter(laddr=client_2_addr)
- @skip_if_server_version_lt('6.0.0')
+ @skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_client_kill_filter_by_user(self, r, request):
- killuser = 'user_to_kill'
- r.acl_setuser(killuser, enabled=True, reset=True,
- commands=['+get', '+set', '+select'],
- keys=['cache:*'], nopass=True)
+ killuser = "user_to_kill"
+ r.acl_setuser(
+ killuser,
+ enabled=True,
+ reset=True,
+ commands=["+get", "+set", "+select"],
+ keys=["cache:*"],
+ nopass=True,
+ )
_get_client(redis.Redis, request, flushdb=False, username=killuser)
r.client_kill_filter(user=killuser)
clients = r.client_list()
for c in clients:
- assert c['user'] != killuser
+ assert c["user"] != killuser
r.acl_deluser(killuser)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.9.50')
+ @skip_if_server_version_lt("2.9.50")
@skip_if_redis_enterprise
def test_client_pause(self, r):
assert r.client_pause(1)
assert r.client_pause(timeout=1)
with pytest.raises(exceptions.RedisError):
- r.client_pause(timeout='not an integer')
+ r.client_pause(timeout="not an integer")
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
@skip_if_redis_enterprise
def test_client_unpause(self, r):
- assert r.client_unpause() == b'OK'
+ assert r.client_unpause() == b"OK"
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_client_reply(self, r, r_timeout):
- assert r_timeout.client_reply('ON') == b'OK'
+ assert r_timeout.client_reply("ON") == b"OK"
with pytest.raises(exceptions.TimeoutError):
- r_timeout.client_reply('OFF')
+ r_timeout.client_reply("OFF")
- r_timeout.client_reply('SKIP')
+ r_timeout.client_reply("SKIP")
- assert r_timeout.set('foo', 'bar')
+ assert r_timeout.set("foo", "bar")
# validate it was set
- assert r.get('foo') == b'bar'
+ assert r.get("foo") == b"bar"
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.0.0')
+ @skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_client_getredir(self, r):
assert isinstance(r.client_getredir(), int)
@@ -549,37 +606,37 @@ class TestRedisCommands:
@skip_if_redis_enterprise
def test_config_resetstat(self, r):
r.ping()
- prior_commands_processed = int(r.info()['total_commands_processed'])
+ prior_commands_processed = int(r.info()["total_commands_processed"])
assert prior_commands_processed >= 1
r.config_resetstat()
- reset_commands_processed = int(r.info()['total_commands_processed'])
+ reset_commands_processed = int(r.info()["total_commands_processed"])
assert reset_commands_processed < prior_commands_processed
@skip_if_redis_enterprise
def test_config_set(self, r):
- r.config_set('timeout', 70)
- assert r.config_get()['timeout'] == '70'
- assert r.config_set('timeout', 0)
- assert r.config_get()['timeout'] == '0'
+ r.config_set("timeout", 70)
+ assert r.config_get()["timeout"] == "70"
+ assert r.config_set("timeout", 0)
+ assert r.config_get()["timeout"] == "0"
@pytest.mark.onlynoncluster
def test_dbsize(self, r):
- r['a'] = 'foo'
- r['b'] = 'bar'
+ r["a"] = "foo"
+ r["b"] = "bar"
assert r.dbsize() == 2
@pytest.mark.onlynoncluster
def test_echo(self, r):
- assert r.echo('foo bar') == b'foo bar'
+ assert r.echo("foo bar") == b"foo bar"
@pytest.mark.onlynoncluster
def test_info(self, r):
- r['a'] = 'foo'
- r['b'] = 'bar'
+ r["a"] = "foo"
+ r["b"] = "bar"
info = r.info()
assert isinstance(info, dict)
- assert 'arch_bits' in info.keys()
- assert 'redis_version' in info.keys()
+ assert "arch_bits" in info.keys()
+ assert "redis_version" in info.keys()
@pytest.mark.onlynoncluster
@skip_if_redis_enterprise
@@ -587,20 +644,20 @@ class TestRedisCommands:
assert isinstance(r.lastsave(), datetime.datetime)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_lolwut(self, r):
- lolwut = r.lolwut().decode('utf-8')
- assert 'Redis ver.' in lolwut
+ lolwut = r.lolwut().decode("utf-8")
+ assert "Redis ver." in lolwut
- lolwut = r.lolwut(5, 6, 7, 8).decode('utf-8')
- assert 'Redis ver.' in lolwut
+ lolwut = r.lolwut(5, 6, 7, 8).decode("utf-8")
+ assert "Redis ver." in lolwut
def test_object(self, r):
- r['a'] = 'foo'
- assert isinstance(r.object('refcount', 'a'), int)
- assert isinstance(r.object('idletime', 'a'), int)
- assert r.object('encoding', 'a') in (b'raw', b'embstr')
- assert r.object('idletime', 'invalid-key') is None
+ r["a"] = "foo"
+ assert isinstance(r.object("refcount", "a"), int)
+ assert isinstance(r.object("idletime", "a"), int)
+ assert r.object("encoding", "a") in (b"raw", b"embstr")
+ assert r.object("idletime", "invalid-key") is None
def test_ping(self, r):
assert r.ping()
@@ -612,36 +669,34 @@ class TestRedisCommands:
@pytest.mark.onlynoncluster
def test_slowlog_get(self, r, slowlog):
assert r.slowlog_reset()
- unicode_string = chr(3456) + 'abcd' + chr(3421)
+ unicode_string = chr(3456) + "abcd" + chr(3421)
r.get(unicode_string)
slowlog = r.slowlog_get()
assert isinstance(slowlog, list)
- commands = [log['command'] for log in slowlog]
+ commands = [log["command"] for log in slowlog]
- get_command = b' '.join((b'GET', unicode_string.encode('utf-8')))
+ get_command = b" ".join((b"GET", unicode_string.encode("utf-8")))
assert get_command in commands
- assert b'SLOWLOG RESET' in commands
+ assert b"SLOWLOG RESET" in commands
# the order should be ['GET <uni string>', 'SLOWLOG RESET'],
# but if other clients are executing commands at the same time, there
# could be commands, before, between, or after, so just check that
# the two we care about are in the appropriate order.
- assert commands.index(get_command) < commands.index(b'SLOWLOG RESET')
+ assert commands.index(get_command) < commands.index(b"SLOWLOG RESET")
# make sure other attributes are typed correctly
- assert isinstance(slowlog[0]['start_time'], int)
- assert isinstance(slowlog[0]['duration'], int)
+ assert isinstance(slowlog[0]["start_time"], int)
+ assert isinstance(slowlog[0]["duration"], int)
# Mock result if we didn't get slowlog complexity info.
- if 'complexity' not in slowlog[0]:
+ if "complexity" not in slowlog[0]:
# monkey patch parse_response()
COMPLEXITY_STATEMENT = "Complexity info: N:4712,M:3788"
old_parse_response = r.parse_response
def parse_response(connection, command_name, **options):
- if command_name != 'SLOWLOG GET':
- return old_parse_response(connection,
- command_name,
- **options)
+ if command_name != "SLOWLOG GET":
+ return old_parse_response(connection, command_name, **options)
responses = connection.read_response()
for response in responses:
# Complexity info stored as fourth item in list
@@ -653,10 +708,10 @@ class TestRedisCommands:
# test
slowlog = r.slowlog_get()
assert isinstance(slowlog, list)
- commands = [log['command'] for log in slowlog]
+ commands = [log["command"] for log in slowlog]
assert get_command in commands
idx = commands.index(get_command)
- assert slowlog[idx]['complexity'] == COMPLEXITY_STATEMENT
+ assert slowlog[idx]["complexity"] == COMPLEXITY_STATEMENT
# tear down monkeypatch
r.parse_response = old_parse_response
@@ -664,7 +719,7 @@ class TestRedisCommands:
@pytest.mark.onlynoncluster
def test_slowlog_get_limit(self, r, slowlog):
assert r.slowlog_reset()
- r.get('foo')
+ r.get("foo")
slowlog = r.slowlog_get(1)
assert isinstance(slowlog, list)
# only one command, based on the number we passed to slowlog_get()
@@ -672,10 +727,10 @@ class TestRedisCommands:
@pytest.mark.onlynoncluster
def test_slowlog_length(self, r, slowlog):
- r.get('foo')
+ r.get("foo")
assert isinstance(r.slowlog_len(), int)
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_time(self, r):
t = r.time()
assert len(t) == 2
@@ -690,104 +745,104 @@ class TestRedisCommands:
# BASIC KEY COMMANDS
def test_append(self, r):
- assert r.append('a', 'a1') == 2
- assert r['a'] == b'a1'
- assert r.append('a', 'a2') == 4
- assert r['a'] == b'a1a2'
+ assert r.append("a", "a1") == 2
+ assert r["a"] == b"a1"
+ assert r.append("a", "a2") == 4
+ assert r["a"] == b"a1a2"
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_bitcount(self, r):
- r.setbit('a', 5, True)
- assert r.bitcount('a') == 1
- r.setbit('a', 6, True)
- assert r.bitcount('a') == 2
- r.setbit('a', 5, False)
- assert r.bitcount('a') == 1
- r.setbit('a', 9, True)
- r.setbit('a', 17, True)
- r.setbit('a', 25, True)
- r.setbit('a', 33, True)
- assert r.bitcount('a') == 5
- assert r.bitcount('a', 0, -1) == 5
- assert r.bitcount('a', 2, 3) == 2
- assert r.bitcount('a', 2, -1) == 3
- assert r.bitcount('a', -2, -1) == 2
- assert r.bitcount('a', 1, 1) == 1
-
- @pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.0')
+ r.setbit("a", 5, True)
+ assert r.bitcount("a") == 1
+ r.setbit("a", 6, True)
+ assert r.bitcount("a") == 2
+ r.setbit("a", 5, False)
+ assert r.bitcount("a") == 1
+ r.setbit("a", 9, True)
+ r.setbit("a", 17, True)
+ r.setbit("a", 25, True)
+ r.setbit("a", 33, True)
+ assert r.bitcount("a") == 5
+ assert r.bitcount("a", 0, -1) == 5
+ assert r.bitcount("a", 2, 3) == 2
+ assert r.bitcount("a", 2, -1) == 3
+ assert r.bitcount("a", -2, -1) == 2
+ assert r.bitcount("a", 1, 1) == 1
+
+ @pytest.mark.onlynoncluster
+ @skip_if_server_version_lt("2.6.0")
def test_bitop_not_empty_string(self, r):
- r['a'] = ''
- r.bitop('not', 'r', 'a')
- assert r.get('r') is None
+ r["a"] = ""
+ r.bitop("not", "r", "a")
+ assert r.get("r") is None
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_bitop_not(self, r):
- test_str = b'\xAA\x00\xFF\x55'
+ test_str = b"\xAA\x00\xFF\x55"
correct = ~0xAA00FF55 & 0xFFFFFFFF
- r['a'] = test_str
- r.bitop('not', 'r', 'a')
- assert int(binascii.hexlify(r['r']), 16) == correct
+ r["a"] = test_str
+ r.bitop("not", "r", "a")
+ assert int(binascii.hexlify(r["r"]), 16) == correct
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_bitop_not_in_place(self, r):
- test_str = b'\xAA\x00\xFF\x55'
+ test_str = b"\xAA\x00\xFF\x55"
correct = ~0xAA00FF55 & 0xFFFFFFFF
- r['a'] = test_str
- r.bitop('not', 'a', 'a')
- assert int(binascii.hexlify(r['a']), 16) == correct
+ r["a"] = test_str
+ r.bitop("not", "a", "a")
+ assert int(binascii.hexlify(r["a"]), 16) == correct
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_bitop_single_string(self, r):
- test_str = b'\x01\x02\xFF'
- r['a'] = test_str
- r.bitop('and', 'res1', 'a')
- r.bitop('or', 'res2', 'a')
- r.bitop('xor', 'res3', 'a')
- assert r['res1'] == test_str
- assert r['res2'] == test_str
- assert r['res3'] == test_str
+ test_str = b"\x01\x02\xFF"
+ r["a"] = test_str
+ r.bitop("and", "res1", "a")
+ r.bitop("or", "res2", "a")
+ r.bitop("xor", "res3", "a")
+ assert r["res1"] == test_str
+ assert r["res2"] == test_str
+ assert r["res3"] == test_str
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_bitop_string_operands(self, r):
- r['a'] = b'\x01\x02\xFF\xFF'
- r['b'] = b'\x01\x02\xFF'
- r.bitop('and', 'res1', 'a', 'b')
- r.bitop('or', 'res2', 'a', 'b')
- r.bitop('xor', 'res3', 'a', 'b')
- assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00
- assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF
- assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF
+ r["a"] = b"\x01\x02\xFF\xFF"
+ r["b"] = b"\x01\x02\xFF"
+ r.bitop("and", "res1", "a", "b")
+ r.bitop("or", "res2", "a", "b")
+ r.bitop("xor", "res3", "a", "b")
+ assert int(binascii.hexlify(r["res1"]), 16) == 0x0102FF00
+ assert int(binascii.hexlify(r["res2"]), 16) == 0x0102FFFF
+ assert int(binascii.hexlify(r["res3"]), 16) == 0x000000FF
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.7')
+ @skip_if_server_version_lt("2.8.7")
def test_bitpos(self, r):
- key = 'key:bitpos'
- r.set(key, b'\xff\xf0\x00')
+ key = "key:bitpos"
+ r.set(key, b"\xff\xf0\x00")
assert r.bitpos(key, 0) == 12
assert r.bitpos(key, 0, 2, -1) == 16
assert r.bitpos(key, 0, -2, -1) == 12
- r.set(key, b'\x00\xff\xf0')
+ r.set(key, b"\x00\xff\xf0")
assert r.bitpos(key, 1, 0) == 8
assert r.bitpos(key, 1, 1) == 8
- r.set(key, b'\x00\x00\x00')
+ r.set(key, b"\x00\x00\x00")
assert r.bitpos(key, 1) == -1
- @skip_if_server_version_lt('2.8.7')
+ @skip_if_server_version_lt("2.8.7")
def test_bitpos_wrong_arguments(self, r):
- key = 'key:bitpos:wrong:args'
- r.set(key, b'\xff\xf0\x00')
+ key = "key:bitpos:wrong:args"
+ r.set(key, b"\xff\xf0\x00")
with pytest.raises(exceptions.RedisError):
r.bitpos(key, 0, end=1) == 12
with pytest.raises(exceptions.RedisError):
r.bitpos(key, 7) == 12
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_copy(self, r):
assert r.copy("a", "b") == 0
r.set("a", "foo")
@@ -796,7 +851,7 @@ class TestRedisCommands:
assert r.get("b") == b"foo"
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_copy_and_replace(self, r):
r.set("a", "foo1")
r.set("b", "foo2")
@@ -804,7 +859,7 @@ class TestRedisCommands:
assert r.copy("a", "b", replace=True) == 1
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_copy_to_another_database(self, request):
r0 = _get_client(redis.Redis, request, db=0)
r1 = _get_client(redis.Redis, request, db=1)
@@ -813,2268 +868,2477 @@ class TestRedisCommands:
assert r1.get("b") == b"foo"
def test_decr(self, r):
- assert r.decr('a') == -1
- assert r['a'] == b'-1'
- assert r.decr('a') == -2
- assert r['a'] == b'-2'
- assert r.decr('a', amount=5) == -7
- assert r['a'] == b'-7'
+ assert r.decr("a") == -1
+ assert r["a"] == b"-1"
+ assert r.decr("a") == -2
+ assert r["a"] == b"-2"
+ assert r.decr("a", amount=5) == -7
+ assert r["a"] == b"-7"
def test_decrby(self, r):
- assert r.decrby('a', amount=2) == -2
- assert r.decrby('a', amount=3) == -5
- assert r['a'] == b'-5'
+ assert r.decrby("a", amount=2) == -2
+ assert r.decrby("a", amount=3) == -5
+ assert r["a"] == b"-5"
def test_delete(self, r):
- assert r.delete('a') == 0
- r['a'] = 'foo'
- assert r.delete('a') == 1
+ assert r.delete("a") == 0
+ r["a"] = "foo"
+ assert r.delete("a") == 1
def test_delete_with_multiple_keys(self, r):
- r['a'] = 'foo'
- r['b'] = 'bar'
- assert r.delete('a', 'b') == 2
- assert r.get('a') is None
- assert r.get('b') is None
+ r["a"] = "foo"
+ r["b"] = "bar"
+ assert r.delete("a", "b") == 2
+ assert r.get("a") is None
+ assert r.get("b") is None
def test_delitem(self, r):
- r['a'] = 'foo'
- del r['a']
- assert r.get('a') is None
+ r["a"] = "foo"
+ del r["a"]
+ assert r.get("a") is None
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_unlink(self, r):
- assert r.unlink('a') == 0
- r['a'] = 'foo'
- assert r.unlink('a') == 1
- assert r.get('a') is None
+ assert r.unlink("a") == 0
+ r["a"] = "foo"
+ assert r.unlink("a") == 1
+ assert r.get("a") is None
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_unlink_with_multiple_keys(self, r):
- r['a'] = 'foo'
- r['b'] = 'bar'
- assert r.unlink('a', 'b') == 2
- assert r.get('a') is None
- assert r.get('b') is None
+ r["a"] = "foo"
+ r["b"] = "bar"
+ assert r.unlink("a", "b") == 2
+ assert r.get("a") is None
+ assert r.get("b") is None
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_dump_and_restore(self, r):
- r['a'] = 'foo'
- dumped = r.dump('a')
- del r['a']
- r.restore('a', 0, dumped)
- assert r['a'] == b'foo'
+ r["a"] = "foo"
+ dumped = r.dump("a")
+ del r["a"]
+ r.restore("a", 0, dumped)
+ assert r["a"] == b"foo"
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
def test_dump_and_restore_and_replace(self, r):
- r['a'] = 'bar'
- dumped = r.dump('a')
+ r["a"] = "bar"
+ dumped = r.dump("a")
with pytest.raises(redis.ResponseError):
- r.restore('a', 0, dumped)
+ r.restore("a", 0, dumped)
- r.restore('a', 0, dumped, replace=True)
- assert r['a'] == b'bar'
+ r.restore("a", 0, dumped, replace=True)
+ assert r["a"] == b"bar"
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_dump_and_restore_absttl(self, r):
- r['a'] = 'foo'
- dumped = r.dump('a')
- del r['a']
+ r["a"] = "foo"
+ dumped = r.dump("a")
+ del r["a"]
ttl = int(
- (redis_server_time(r) + datetime.timedelta(minutes=1)).timestamp()
- * 1000
+ (redis_server_time(r) + datetime.timedelta(minutes=1)).timestamp() * 1000
)
- r.restore('a', ttl, dumped, absttl=True)
- assert r['a'] == b'foo'
- assert 0 < r.ttl('a') <= 61
+ r.restore("a", ttl, dumped, absttl=True)
+ assert r["a"] == b"foo"
+ assert 0 < r.ttl("a") <= 61
def test_exists(self, r):
- assert r.exists('a') == 0
- r['a'] = 'foo'
- r['b'] = 'bar'
- assert r.exists('a') == 1
- assert r.exists('a', 'b') == 2
+ assert r.exists("a") == 0
+ r["a"] = "foo"
+ r["b"] = "bar"
+ assert r.exists("a") == 1
+ assert r.exists("a", "b") == 2
def test_exists_contains(self, r):
- assert 'a' not in r
- r['a'] = 'foo'
- assert 'a' in r
+ assert "a" not in r
+ r["a"] = "foo"
+ assert "a" in r
def test_expire(self, r):
- assert r.expire('a', 10) is False
- r['a'] = 'foo'
- assert r.expire('a', 10) is True
- assert 0 < r.ttl('a') <= 10
- assert r.persist('a')
- assert r.ttl('a') == -1
+ assert r.expire("a", 10) is False
+ r["a"] = "foo"
+ assert r.expire("a", 10) is True
+ assert 0 < r.ttl("a") <= 10
+ assert r.persist("a")
+ assert r.ttl("a") == -1
def test_expireat_datetime(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- r['a'] = 'foo'
- assert r.expireat('a', expire_at) is True
- assert 0 < r.ttl('a') <= 61
+ r["a"] = "foo"
+ assert r.expireat("a", expire_at) is True
+ assert 0 < r.ttl("a") <= 61
def test_expireat_no_key(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- assert r.expireat('a', expire_at) is False
+ assert r.expireat("a", expire_at) is False
def test_expireat_unixtime(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- r['a'] = 'foo'
+ r["a"] = "foo"
expire_at_seconds = int(time.mktime(expire_at.timetuple()))
- assert r.expireat('a', expire_at_seconds) is True
- assert 0 < r.ttl('a') <= 61
+ assert r.expireat("a", expire_at_seconds) is True
+ assert 0 < r.ttl("a") <= 61
def test_get_and_set(self, r):
# get and set can't be tested independently of each other
- assert r.get('a') is None
- byte_string = b'value'
+ assert r.get("a") is None
+ byte_string = b"value"
integer = 5
- unicode_string = chr(3456) + 'abcd' + chr(3421)
- assert r.set('byte_string', byte_string)
- assert r.set('integer', 5)
- assert r.set('unicode_string', unicode_string)
- assert r.get('byte_string') == byte_string
- assert r.get('integer') == str(integer).encode()
- assert r.get('unicode_string').decode('utf-8') == unicode_string
-
- @skip_if_server_version_lt('6.2.0')
+ unicode_string = chr(3456) + "abcd" + chr(3421)
+ assert r.set("byte_string", byte_string)
+ assert r.set("integer", 5)
+ assert r.set("unicode_string", unicode_string)
+ assert r.get("byte_string") == byte_string
+ assert r.get("integer") == str(integer).encode()
+ assert r.get("unicode_string").decode("utf-8") == unicode_string
+
+ @skip_if_server_version_lt("6.2.0")
def test_getdel(self, r):
- assert r.getdel('a') is None
- r.set('a', 1)
- assert r.getdel('a') == b'1'
- assert r.getdel('a') is None
+ assert r.getdel("a") is None
+ r.set("a", 1)
+ assert r.getdel("a") == b"1"
+ assert r.getdel("a") is None
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_getex(self, r):
- r.set('a', 1)
- assert r.getex('a') == b'1'
- assert r.ttl('a') == -1
- assert r.getex('a', ex=60) == b'1'
- assert r.ttl('a') == 60
- assert r.getex('a', px=6000) == b'1'
- assert r.ttl('a') == 6
+ r.set("a", 1)
+ assert r.getex("a") == b"1"
+ assert r.ttl("a") == -1
+ assert r.getex("a", ex=60) == b"1"
+ assert r.ttl("a") == 60
+ assert r.getex("a", px=6000) == b"1"
+ assert r.ttl("a") == 6
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- assert r.getex('a', pxat=expire_at) == b'1'
- assert r.ttl('a') <= 61
- assert r.getex('a', persist=True) == b'1'
- assert r.ttl('a') == -1
+ assert r.getex("a", pxat=expire_at) == b"1"
+ assert r.ttl("a") <= 61
+ assert r.getex("a", persist=True) == b"1"
+ assert r.ttl("a") == -1
def test_getitem_and_setitem(self, r):
- r['a'] = 'bar'
- assert r['a'] == b'bar'
+ r["a"] = "bar"
+ assert r["a"] == b"bar"
def test_getitem_raises_keyerror_for_missing_key(self, r):
with pytest.raises(KeyError):
- r['a']
+ r["a"]
def test_getitem_does_not_raise_keyerror_for_empty_string(self, r):
- r['a'] = b""
- assert r['a'] == b""
+ r["a"] = b""
+ assert r["a"] == b""
def test_get_set_bit(self, r):
# no value
- assert not r.getbit('a', 5)
+ assert not r.getbit("a", 5)
# set bit 5
- assert not r.setbit('a', 5, True)
- assert r.getbit('a', 5)
+ assert not r.setbit("a", 5, True)
+ assert r.getbit("a", 5)
# unset bit 4
- assert not r.setbit('a', 4, False)
- assert not r.getbit('a', 4)
+ assert not r.setbit("a", 4, False)
+ assert not r.getbit("a", 4)
# set bit 4
- assert not r.setbit('a', 4, True)
- assert r.getbit('a', 4)
+ assert not r.setbit("a", 4, True)
+ assert r.getbit("a", 4)
# set bit 5 again
- assert r.setbit('a', 5, True)
- assert r.getbit('a', 5)
+ assert r.setbit("a", 5, True)
+ assert r.getbit("a", 5)
def test_getrange(self, r):
- r['a'] = 'foo'
- assert r.getrange('a', 0, 0) == b'f'
- assert r.getrange('a', 0, 2) == b'foo'
- assert r.getrange('a', 3, 4) == b''
+ r["a"] = "foo"
+ assert r.getrange("a", 0, 0) == b"f"
+ assert r.getrange("a", 0, 2) == b"foo"
+ assert r.getrange("a", 3, 4) == b""
def test_getset(self, r):
- assert r.getset('a', 'foo') is None
- assert r.getset('a', 'bar') == b'foo'
- assert r.get('a') == b'bar'
+ assert r.getset("a", "foo") is None
+ assert r.getset("a", "bar") == b"foo"
+ assert r.get("a") == b"bar"
def test_incr(self, r):
- assert r.incr('a') == 1
- assert r['a'] == b'1'
- assert r.incr('a') == 2
- assert r['a'] == b'2'
- assert r.incr('a', amount=5) == 7
- assert r['a'] == b'7'
+ assert r.incr("a") == 1
+ assert r["a"] == b"1"
+ assert r.incr("a") == 2
+ assert r["a"] == b"2"
+ assert r.incr("a", amount=5) == 7
+ assert r["a"] == b"7"
def test_incrby(self, r):
- assert r.incrby('a') == 1
- assert r.incrby('a', 4) == 5
- assert r['a'] == b'5'
+ assert r.incrby("a") == 1
+ assert r.incrby("a", 4) == 5
+ assert r["a"] == b"5"
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_incrbyfloat(self, r):
- assert r.incrbyfloat('a') == 1.0
- assert r['a'] == b'1'
- assert r.incrbyfloat('a', 1.1) == 2.1
- assert float(r['a']) == float(2.1)
+ assert r.incrbyfloat("a") == 1.0
+ assert r["a"] == b"1"
+ assert r.incrbyfloat("a", 1.1) == 2.1
+ assert float(r["a"]) == float(2.1)
@pytest.mark.onlynoncluster
def test_keys(self, r):
assert r.keys() == []
- keys_with_underscores = {b'test_a', b'test_b'}
- keys = keys_with_underscores.union({b'testc'})
+ keys_with_underscores = {b"test_a", b"test_b"}
+ keys = keys_with_underscores.union({b"testc"})
for key in keys:
r[key] = 1
- assert set(r.keys(pattern='test_*')) == keys_with_underscores
- assert set(r.keys(pattern='test*')) == keys
+ assert set(r.keys(pattern="test_*")) == keys_with_underscores
+ assert set(r.keys(pattern="test*")) == keys
@pytest.mark.onlynoncluster
def test_mget(self, r):
assert r.mget([]) == []
- assert r.mget(['a', 'b']) == [None, None]
- r['a'] = '1'
- r['b'] = '2'
- r['c'] = '3'
- assert r.mget('a', 'other', 'b', 'c') == [b'1', None, b'2', b'3']
+ assert r.mget(["a", "b"]) == [None, None]
+ r["a"] = "1"
+ r["b"] = "2"
+ r["c"] = "3"
+ assert r.mget("a", "other", "b", "c") == [b"1", None, b"2", b"3"]
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_lmove(self, r):
- r.rpush('a', 'one', 'two', 'three', 'four')
- assert r.lmove('a', 'b')
- assert r.lmove('a', 'b', 'right', 'left')
+ r.rpush("a", "one", "two", "three", "four")
+ assert r.lmove("a", "b")
+ assert r.lmove("a", "b", "right", "left")
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_blmove(self, r):
- r.rpush('a', 'one', 'two', 'three', 'four')
- assert r.blmove('a', 'b', 5)
- assert r.blmove('a', 'b', 1, 'RIGHT', 'LEFT')
+ r.rpush("a", "one", "two", "three", "four")
+ assert r.blmove("a", "b", 5)
+ assert r.blmove("a", "b", 1, "RIGHT", "LEFT")
@pytest.mark.onlynoncluster
def test_mset(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3'}
+ d = {"a": b"1", "b": b"2", "c": b"3"}
assert r.mset(d)
for k, v in d.items():
assert r[k] == v
@pytest.mark.onlynoncluster
def test_msetnx(self, r):
- d = {'a': b'1', 'b': b'2', 'c': b'3'}
+ d = {"a": b"1", "b": b"2", "c": b"3"}
assert r.msetnx(d)
- d2 = {'a': b'x', 'd': b'4'}
+ d2 = {"a": b"x", "d": b"4"}
assert not r.msetnx(d2)
for k, v in d.items():
assert r[k] == v
- assert r.get('d') is None
+ assert r.get("d") is None
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_pexpire(self, r):
- assert r.pexpire('a', 60000) is False
- r['a'] = 'foo'
- assert r.pexpire('a', 60000) is True
- assert 0 < r.pttl('a') <= 60000
- assert r.persist('a')
- assert r.pttl('a') == -1
-
- @skip_if_server_version_lt('2.6.0')
+ assert r.pexpire("a", 60000) is False
+ r["a"] = "foo"
+ assert r.pexpire("a", 60000) is True
+ assert 0 < r.pttl("a") <= 60000
+ assert r.persist("a")
+ assert r.pttl("a") == -1
+
+ @skip_if_server_version_lt("2.6.0")
def test_pexpireat_datetime(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- r['a'] = 'foo'
- assert r.pexpireat('a', expire_at) is True
- assert 0 < r.pttl('a') <= 61000
+ r["a"] = "foo"
+ assert r.pexpireat("a", expire_at) is True
+ assert 0 < r.pttl("a") <= 61000
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_pexpireat_no_key(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- assert r.pexpireat('a', expire_at) is False
+ assert r.pexpireat("a", expire_at) is False
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_pexpireat_unixtime(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
- r['a'] = 'foo'
+ r["a"] = "foo"
expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000
- assert r.pexpireat('a', expire_at_seconds) is True
- assert 0 < r.pttl('a') <= 61000
+ assert r.pexpireat("a", expire_at_seconds) is True
+ assert 0 < r.pttl("a") <= 61000
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_psetex(self, r):
- assert r.psetex('a', 1000, 'value')
- assert r['a'] == b'value'
- assert 0 < r.pttl('a') <= 1000
+ assert r.psetex("a", 1000, "value")
+ assert r["a"] == b"value"
+ assert 0 < r.pttl("a") <= 1000
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_psetex_timedelta(self, r):
expire_at = datetime.timedelta(milliseconds=1000)
- assert r.psetex('a', expire_at, 'value')
- assert r['a'] == b'value'
- assert 0 < r.pttl('a') <= 1000
+ assert r.psetex("a", expire_at, "value")
+ assert r["a"] == b"value"
+ assert 0 < r.pttl("a") <= 1000
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_pttl(self, r):
- assert r.pexpire('a', 10000) is False
- r['a'] = '1'
- assert r.pexpire('a', 10000) is True
- assert 0 < r.pttl('a') <= 10000
- assert r.persist('a')
- assert r.pttl('a') == -1
-
- @skip_if_server_version_lt('2.8.0')
+ assert r.pexpire("a", 10000) is False
+ r["a"] = "1"
+ assert r.pexpire("a", 10000) is True
+ assert 0 < r.pttl("a") <= 10000
+ assert r.persist("a")
+ assert r.pttl("a") == -1
+
+ @skip_if_server_version_lt("2.8.0")
def test_pttl_no_key(self, r):
"PTTL on servers 2.8 and after return -2 when the key doesn't exist"
- assert r.pttl('a') == -2
+ assert r.pttl("a") == -2
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_hrandfield(self, r):
- assert r.hrandfield('key') is None
- r.hset('key', mapping={'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5})
- assert r.hrandfield('key') is not None
- assert len(r.hrandfield('key', 2)) == 2
+ assert r.hrandfield("key") is None
+ r.hset("key", mapping={"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})
+ assert r.hrandfield("key") is not None
+ assert len(r.hrandfield("key", 2)) == 2
# with values
- assert len(r.hrandfield('key', 2, True)) == 4
+ assert len(r.hrandfield("key", 2, True)) == 4
# without duplications
- assert len(r.hrandfield('key', 10)) == 5
+ assert len(r.hrandfield("key", 10)) == 5
# with duplications
- assert len(r.hrandfield('key', -10)) == 10
+ assert len(r.hrandfield("key", -10)) == 10
@pytest.mark.onlynoncluster
def test_randomkey(self, r):
assert r.randomkey() is None
- for key in ('a', 'b', 'c'):
+ for key in ("a", "b", "c"):
r[key] = 1
- assert r.randomkey() in (b'a', b'b', b'c')
+ assert r.randomkey() in (b"a", b"b", b"c")
@pytest.mark.onlynoncluster
def test_rename(self, r):
- r['a'] = '1'
- assert r.rename('a', 'b')
- assert r.get('a') is None
- assert r['b'] == b'1'
+ r["a"] = "1"
+ assert r.rename("a", "b")
+ assert r.get("a") is None
+ assert r["b"] == b"1"
@pytest.mark.onlynoncluster
def test_renamenx(self, r):
- r['a'] = '1'
- r['b'] = '2'
- assert not r.renamenx('a', 'b')
- assert r['a'] == b'1'
- assert r['b'] == b'2'
+ r["a"] = "1"
+ r["b"] = "2"
+ assert not r.renamenx("a", "b")
+ assert r["a"] == b"1"
+ assert r["b"] == b"2"
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_nx(self, r):
- assert r.set('a', '1', nx=True)
- assert not r.set('a', '2', nx=True)
- assert r['a'] == b'1'
+ assert r.set("a", "1", nx=True)
+ assert not r.set("a", "2", nx=True)
+ assert r["a"] == b"1"
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_xx(self, r):
- assert not r.set('a', '1', xx=True)
- assert r.get('a') is None
- r['a'] = 'bar'
- assert r.set('a', '2', xx=True)
- assert r.get('a') == b'2'
+ assert not r.set("a", "1", xx=True)
+ assert r.get("a") is None
+ r["a"] = "bar"
+ assert r.set("a", "2", xx=True)
+ assert r.get("a") == b"2"
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_px(self, r):
- assert r.set('a', '1', px=10000)
- assert r['a'] == b'1'
- assert 0 < r.pttl('a') <= 10000
- assert 0 < r.ttl('a') <= 10
+ assert r.set("a", "1", px=10000)
+ assert r["a"] == b"1"
+ assert 0 < r.pttl("a") <= 10000
+ assert 0 < r.ttl("a") <= 10
with pytest.raises(exceptions.DataError):
- assert r.set('a', '1', px=10.0)
+ assert r.set("a", "1", px=10.0)
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_px_timedelta(self, r):
expire_at = datetime.timedelta(milliseconds=1000)
- assert r.set('a', '1', px=expire_at)
- assert 0 < r.pttl('a') <= 1000
- assert 0 < r.ttl('a') <= 1
+ assert r.set("a", "1", px=expire_at)
+ assert 0 < r.pttl("a") <= 1000
+ assert 0 < r.ttl("a") <= 1
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_ex(self, r):
- assert r.set('a', '1', ex=10)
- assert 0 < r.ttl('a') <= 10
+ assert r.set("a", "1", ex=10)
+ assert 0 < r.ttl("a") <= 10
with pytest.raises(exceptions.DataError):
- assert r.set('a', '1', ex=10.0)
+ assert r.set("a", "1", ex=10.0)
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_ex_timedelta(self, r):
expire_at = datetime.timedelta(seconds=60)
- assert r.set('a', '1', ex=expire_at)
- assert 0 < r.ttl('a') <= 60
+ assert r.set("a", "1", ex=expire_at)
+ assert 0 < r.ttl("a") <= 60
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_set_exat_timedelta(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(seconds=10)
- assert r.set('a', '1', exat=expire_at)
- assert 0 < r.ttl('a') <= 10
+ assert r.set("a", "1", exat=expire_at)
+ assert 0 < r.ttl("a") <= 10
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_set_pxat_timedelta(self, r):
expire_at = redis_server_time(r) + datetime.timedelta(seconds=50)
- assert r.set('a', '1', pxat=expire_at)
- assert 0 < r.ttl('a') <= 100
+ assert r.set("a", "1", pxat=expire_at)
+ assert 0 < r.ttl("a") <= 100
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_set_multipleoptions(self, r):
- r['a'] = 'val'
- assert r.set('a', '1', xx=True, px=10000)
- assert 0 < r.ttl('a') <= 10
+ r["a"] = "val"
+ assert r.set("a", "1", xx=True, px=10000)
+ assert 0 < r.ttl("a") <= 10
@skip_if_server_version_lt("6.0.0")
def test_set_keepttl(self, r):
- r['a'] = 'val'
- assert r.set('a', '1', xx=True, px=10000)
- assert 0 < r.ttl('a') <= 10
- r.set('a', '2', keepttl=True)
- assert r.get('a') == b'2'
- assert 0 < r.ttl('a') <= 10
-
- @skip_if_server_version_lt('6.2.0')
+ r["a"] = "val"
+ assert r.set("a", "1", xx=True, px=10000)
+ assert 0 < r.ttl("a") <= 10
+ r.set("a", "2", keepttl=True)
+ assert r.get("a") == b"2"
+ assert 0 < r.ttl("a") <= 10
+
+ @skip_if_server_version_lt("6.2.0")
def test_set_get(self, r):
- assert r.set('a', 'True', get=True) is None
- assert r.set('a', 'True', get=True) == b'True'
- assert r.set('a', 'foo') is True
- assert r.set('a', 'bar', get=True) == b'foo'
- assert r.get('a') == b'bar'
+ assert r.set("a", "True", get=True) is None
+ assert r.set("a", "True", get=True) == b"True"
+ assert r.set("a", "foo") is True
+ assert r.set("a", "bar", get=True) == b"foo"
+ assert r.get("a") == b"bar"
def test_setex(self, r):
- assert r.setex('a', 60, '1')
- assert r['a'] == b'1'
- assert 0 < r.ttl('a') <= 60
+ assert r.setex("a", 60, "1")
+ assert r["a"] == b"1"
+ assert 0 < r.ttl("a") <= 60
def test_setnx(self, r):
- assert r.setnx('a', '1')
- assert r['a'] == b'1'
- assert not r.setnx('a', '2')
- assert r['a'] == b'1'
+ assert r.setnx("a", "1")
+ assert r["a"] == b"1"
+ assert not r.setnx("a", "2")
+ assert r["a"] == b"1"
def test_setrange(self, r):
- assert r.setrange('a', 5, 'foo') == 8
- assert r['a'] == b'\0\0\0\0\0foo'
- r['a'] = 'abcdefghijh'
- assert r.setrange('a', 6, '12345') == 11
- assert r['a'] == b'abcdef12345'
+ assert r.setrange("a", 5, "foo") == 8
+ assert r["a"] == b"\0\0\0\0\0foo"
+ r["a"] = "abcdefghijh"
+ assert r.setrange("a", 6, "12345") == 11
+ assert r["a"] == b"abcdef12345"
- @skip_if_server_version_lt('6.0.0')
+ @skip_if_server_version_lt("6.0.0")
def test_stralgo_lcs(self, r):
- key1 = '{foo}key1'
- key2 = '{foo}key2'
- value1 = 'ohmytext'
- value2 = 'mynewtext'
- res = 'mytext'
+ key1 = "{foo}key1"
+ key2 = "{foo}key2"
+ value1 = "ohmytext"
+ value2 = "mynewtext"
+ res = "mytext"
if skip_if_redis_enterprise(None).args[0] is True:
with pytest.raises(redis.exceptions.ResponseError):
- assert r.stralgo('LCS', value1, value2) == res
+ assert r.stralgo("LCS", value1, value2) == res
return
# test LCS of strings
- assert r.stralgo('LCS', value1, value2) == res
+ assert r.stralgo("LCS", value1, value2) == res
# test using keys
r.mset({key1: value1, key2: value2})
- assert r.stralgo('LCS', key1, key2, specific_argument="keys") == res
+ assert r.stralgo("LCS", key1, key2, specific_argument="keys") == res
# test other labels
- assert r.stralgo('LCS', value1, value2, len=True) == len(res)
- assert r.stralgo('LCS', value1, value2, idx=True) == \
- {
- 'len': len(res),
- 'matches': [[(4, 7), (5, 8)], [(2, 3), (0, 1)]]
- }
- assert r.stralgo('LCS', value1, value2,
- idx=True, withmatchlen=True) == \
- {
- 'len': len(res),
- 'matches': [[4, (4, 7), (5, 8)], [2, (2, 3), (0, 1)]]
- }
- assert r.stralgo('LCS', value1, value2,
- idx=True, minmatchlen=4, withmatchlen=True) == \
- {
- 'len': len(res),
- 'matches': [[4, (4, 7), (5, 8)]]
- }
-
- @skip_if_server_version_lt('6.0.0')
+ assert r.stralgo("LCS", value1, value2, len=True) == len(res)
+ assert r.stralgo("LCS", value1, value2, idx=True) == {
+ "len": len(res),
+ "matches": [[(4, 7), (5, 8)], [(2, 3), (0, 1)]],
+ }
+ assert r.stralgo("LCS", value1, value2, idx=True, withmatchlen=True) == {
+ "len": len(res),
+ "matches": [[4, (4, 7), (5, 8)], [2, (2, 3), (0, 1)]],
+ }
+ assert r.stralgo(
+ "LCS", value1, value2, idx=True, minmatchlen=4, withmatchlen=True
+ ) == {"len": len(res), "matches": [[4, (4, 7), (5, 8)]]}
+
+ @skip_if_server_version_lt("6.0.0")
def test_stralgo_negative(self, r):
with pytest.raises(exceptions.DataError):
- r.stralgo('ISSUB', 'value1', 'value2')
+ r.stralgo("ISSUB", "value1", "value2")
with pytest.raises(exceptions.DataError):
- r.stralgo('LCS', 'value1', 'value2', len=True, idx=True)
+ r.stralgo("LCS", "value1", "value2", len=True, idx=True)
with pytest.raises(exceptions.DataError):
- r.stralgo('LCS', 'value1', 'value2', specific_argument="INT")
+ r.stralgo("LCS", "value1", "value2", specific_argument="INT")
with pytest.raises(ValueError):
- r.stralgo('LCS', 'value1', 'value2', idx=True, minmatchlen="one")
+ r.stralgo("LCS", "value1", "value2", idx=True, minmatchlen="one")
def test_strlen(self, r):
- r['a'] = 'foo'
- assert r.strlen('a') == 3
+ r["a"] = "foo"
+ assert r.strlen("a") == 3
def test_substr(self, r):
- r['a'] = '0123456789'
+ r["a"] = "0123456789"
if skip_if_redis_enterprise(None).args[0] is True:
with pytest.raises(redis.exceptions.ResponseError):
- assert r.substr('a', 0) == b'0123456789'
+ assert r.substr("a", 0) == b"0123456789"
return
- assert r.substr('a', 0) == b'0123456789'
- assert r.substr('a', 2) == b'23456789'
- assert r.substr('a', 3, 5) == b'345'
- assert r.substr('a', 3, -2) == b'345678'
+ assert r.substr("a", 0) == b"0123456789"
+ assert r.substr("a", 2) == b"23456789"
+ assert r.substr("a", 3, 5) == b"345"
+ assert r.substr("a", 3, -2) == b"345678"
def test_ttl(self, r):
- r['a'] = '1'
- assert r.expire('a', 10)
- assert 0 < r.ttl('a') <= 10
- assert r.persist('a')
- assert r.ttl('a') == -1
+ r["a"] = "1"
+ assert r.expire("a", 10)
+ assert 0 < r.ttl("a") <= 10
+ assert r.persist("a")
+ assert r.ttl("a") == -1
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_ttl_nokey(self, r):
"TTL on servers 2.8 and after return -2 when the key doesn't exist"
- assert r.ttl('a') == -2
+ assert r.ttl("a") == -2
def test_type(self, r):
- assert r.type('a') == b'none'
- r['a'] = '1'
- assert r.type('a') == b'string'
- del r['a']
- r.lpush('a', '1')
- assert r.type('a') == b'list'
- del r['a']
- r.sadd('a', '1')
- assert r.type('a') == b'set'
- del r['a']
- r.zadd('a', {'1': 1})
- assert r.type('a') == b'zset'
+ assert r.type("a") == b"none"
+ r["a"] = "1"
+ assert r.type("a") == b"string"
+ del r["a"]
+ r.lpush("a", "1")
+ assert r.type("a") == b"list"
+ del r["a"]
+ r.sadd("a", "1")
+ assert r.type("a") == b"set"
+ del r["a"]
+ r.zadd("a", {"1": 1})
+ assert r.type("a") == b"zset"
# LIST COMMANDS
@pytest.mark.onlynoncluster
def test_blpop(self, r):
- r.rpush('a', '1', '2')
- r.rpush('b', '3', '4')
- assert r.blpop(['b', 'a'], timeout=1) == (b'b', b'3')
- assert r.blpop(['b', 'a'], timeout=1) == (b'b', b'4')
- assert r.blpop(['b', 'a'], timeout=1) == (b'a', b'1')
- assert r.blpop(['b', 'a'], timeout=1) == (b'a', b'2')
- assert r.blpop(['b', 'a'], timeout=1) is None
- r.rpush('c', '1')
- assert r.blpop('c', timeout=1) == (b'c', b'1')
+ r.rpush("a", "1", "2")
+ r.rpush("b", "3", "4")
+ assert r.blpop(["b", "a"], timeout=1) == (b"b", b"3")
+ assert r.blpop(["b", "a"], timeout=1) == (b"b", b"4")
+ assert r.blpop(["b", "a"], timeout=1) == (b"a", b"1")
+ assert r.blpop(["b", "a"], timeout=1) == (b"a", b"2")
+ assert r.blpop(["b", "a"], timeout=1) is None
+ r.rpush("c", "1")
+ assert r.blpop("c", timeout=1) == (b"c", b"1")
@pytest.mark.onlynoncluster
def test_brpop(self, r):
- r.rpush('a', '1', '2')
- r.rpush('b', '3', '4')
- assert r.brpop(['b', 'a'], timeout=1) == (b'b', b'4')
- assert r.brpop(['b', 'a'], timeout=1) == (b'b', b'3')
- assert r.brpop(['b', 'a'], timeout=1) == (b'a', b'2')
- assert r.brpop(['b', 'a'], timeout=1) == (b'a', b'1')
- assert r.brpop(['b', 'a'], timeout=1) is None
- r.rpush('c', '1')
- assert r.brpop('c', timeout=1) == (b'c', b'1')
+ r.rpush("a", "1", "2")
+ r.rpush("b", "3", "4")
+ assert r.brpop(["b", "a"], timeout=1) == (b"b", b"4")
+ assert r.brpop(["b", "a"], timeout=1) == (b"b", b"3")
+ assert r.brpop(["b", "a"], timeout=1) == (b"a", b"2")
+ assert r.brpop(["b", "a"], timeout=1) == (b"a", b"1")
+ assert r.brpop(["b", "a"], timeout=1) is None
+ r.rpush("c", "1")
+ assert r.brpop("c", timeout=1) == (b"c", b"1")
@pytest.mark.onlynoncluster
def test_brpoplpush(self, r):
- r.rpush('a', '1', '2')
- r.rpush('b', '3', '4')
- assert r.brpoplpush('a', 'b') == b'2'
- assert r.brpoplpush('a', 'b') == b'1'
- assert r.brpoplpush('a', 'b', timeout=1) is None
- assert r.lrange('a', 0, -1) == []
- assert r.lrange('b', 0, -1) == [b'1', b'2', b'3', b'4']
+ r.rpush("a", "1", "2")
+ r.rpush("b", "3", "4")
+ assert r.brpoplpush("a", "b") == b"2"
+ assert r.brpoplpush("a", "b") == b"1"
+ assert r.brpoplpush("a", "b", timeout=1) is None
+ assert r.lrange("a", 0, -1) == []
+ assert r.lrange("b", 0, -1) == [b"1", b"2", b"3", b"4"]
@pytest.mark.onlynoncluster
def test_brpoplpush_empty_string(self, r):
- r.rpush('a', '')
- assert r.brpoplpush('a', 'b') == b''
+ r.rpush("a", "")
+ assert r.brpoplpush("a", "b") == b""
def test_lindex(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.lindex('a', '0') == b'1'
- assert r.lindex('a', '1') == b'2'
- assert r.lindex('a', '2') == b'3'
+ r.rpush("a", "1", "2", "3")
+ assert r.lindex("a", "0") == b"1"
+ assert r.lindex("a", "1") == b"2"
+ assert r.lindex("a", "2") == b"3"
def test_linsert(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.linsert('a', 'after', '2', '2.5') == 4
- assert r.lrange('a', 0, -1) == [b'1', b'2', b'2.5', b'3']
- assert r.linsert('a', 'before', '2', '1.5') == 5
- assert r.lrange('a', 0, -1) == \
- [b'1', b'1.5', b'2', b'2.5', b'3']
+ r.rpush("a", "1", "2", "3")
+ assert r.linsert("a", "after", "2", "2.5") == 4
+ assert r.lrange("a", 0, -1) == [b"1", b"2", b"2.5", b"3"]
+ assert r.linsert("a", "before", "2", "1.5") == 5
+ assert r.lrange("a", 0, -1) == [b"1", b"1.5", b"2", b"2.5", b"3"]
def test_llen(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.llen('a') == 3
+ r.rpush("a", "1", "2", "3")
+ assert r.llen("a") == 3
def test_lpop(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.lpop('a') == b'1'
- assert r.lpop('a') == b'2'
- assert r.lpop('a') == b'3'
- assert r.lpop('a') is None
+ r.rpush("a", "1", "2", "3")
+ assert r.lpop("a") == b"1"
+ assert r.lpop("a") == b"2"
+ assert r.lpop("a") == b"3"
+ assert r.lpop("a") is None
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_lpop_count(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.lpop('a', 2) == [b'1', b'2']
- assert r.lpop('a', 1) == [b'3']
- assert r.lpop('a') is None
- assert r.lpop('a', 3) is None
+ r.rpush("a", "1", "2", "3")
+ assert r.lpop("a", 2) == [b"1", b"2"]
+ assert r.lpop("a", 1) == [b"3"]
+ assert r.lpop("a") is None
+ assert r.lpop("a", 3) is None
def test_lpush(self, r):
- assert r.lpush('a', '1') == 1
- assert r.lpush('a', '2') == 2
- assert r.lpush('a', '3', '4') == 4
- assert r.lrange('a', 0, -1) == [b'4', b'3', b'2', b'1']
+ assert r.lpush("a", "1") == 1
+ assert r.lpush("a", "2") == 2
+ assert r.lpush("a", "3", "4") == 4
+ assert r.lrange("a", 0, -1) == [b"4", b"3", b"2", b"1"]
def test_lpushx(self, r):
- assert r.lpushx('a', '1') == 0
- assert r.lrange('a', 0, -1) == []
- r.rpush('a', '1', '2', '3')
- assert r.lpushx('a', '4') == 4
- assert r.lrange('a', 0, -1) == [b'4', b'1', b'2', b'3']
+ assert r.lpushx("a", "1") == 0
+ assert r.lrange("a", 0, -1) == []
+ r.rpush("a", "1", "2", "3")
+ assert r.lpushx("a", "4") == 4
+ assert r.lrange("a", 0, -1) == [b"4", b"1", b"2", b"3"]
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_lpushx_with_list(self, r):
# now with a list
- r.lpush('somekey', 'a')
- r.lpush('somekey', 'b')
- assert r.lpushx('somekey', 'foo', 'asdasd', 55, 'asdasdas') == 6
- res = r.lrange('somekey', 0, -1)
- assert res == [b'asdasdas', b'55', b'asdasd', b'foo', b'b', b'a']
+ r.lpush("somekey", "a")
+ r.lpush("somekey", "b")
+ assert r.lpushx("somekey", "foo", "asdasd", 55, "asdasdas") == 6
+ res = r.lrange("somekey", 0, -1)
+ assert res == [b"asdasdas", b"55", b"asdasd", b"foo", b"b", b"a"]
def test_lrange(self, r):
- r.rpush('a', '1', '2', '3', '4', '5')
- assert r.lrange('a', 0, 2) == [b'1', b'2', b'3']
- assert r.lrange('a', 2, 10) == [b'3', b'4', b'5']
- assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4', b'5']
+ r.rpush("a", "1", "2", "3", "4", "5")
+ assert r.lrange("a", 0, 2) == [b"1", b"2", b"3"]
+ assert r.lrange("a", 2, 10) == [b"3", b"4", b"5"]
+ assert r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4", b"5"]
def test_lrem(self, r):
- r.rpush('a', 'Z', 'b', 'Z', 'Z', 'c', 'Z', 'Z')
+ r.rpush("a", "Z", "b", "Z", "Z", "c", "Z", "Z")
# remove the first 'Z' item
- assert r.lrem('a', 1, 'Z') == 1
- assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c', b'Z', b'Z']
+ assert r.lrem("a", 1, "Z") == 1
+ assert r.lrange("a", 0, -1) == [b"b", b"Z", b"Z", b"c", b"Z", b"Z"]
# remove the last 2 'Z' items
- assert r.lrem('a', -2, 'Z') == 2
- assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c']
+ assert r.lrem("a", -2, "Z") == 2
+ assert r.lrange("a", 0, -1) == [b"b", b"Z", b"Z", b"c"]
# remove all 'Z' items
- assert r.lrem('a', 0, 'Z') == 2
- assert r.lrange('a', 0, -1) == [b'b', b'c']
+ assert r.lrem("a", 0, "Z") == 2
+ assert r.lrange("a", 0, -1) == [b"b", b"c"]
def test_lset(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.lrange('a', 0, -1) == [b'1', b'2', b'3']
- assert r.lset('a', 1, '4')
- assert r.lrange('a', 0, 2) == [b'1', b'4', b'3']
+ r.rpush("a", "1", "2", "3")
+ assert r.lrange("a", 0, -1) == [b"1", b"2", b"3"]
+ assert r.lset("a", 1, "4")
+ assert r.lrange("a", 0, 2) == [b"1", b"4", b"3"]
def test_ltrim(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.ltrim('a', 0, 1)
- assert r.lrange('a', 0, -1) == [b'1', b'2']
+ r.rpush("a", "1", "2", "3")
+ assert r.ltrim("a", 0, 1)
+ assert r.lrange("a", 0, -1) == [b"1", b"2"]
def test_rpop(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.rpop('a') == b'3'
- assert r.rpop('a') == b'2'
- assert r.rpop('a') == b'1'
- assert r.rpop('a') is None
+ r.rpush("a", "1", "2", "3")
+ assert r.rpop("a") == b"3"
+ assert r.rpop("a") == b"2"
+ assert r.rpop("a") == b"1"
+ assert r.rpop("a") is None
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_rpop_count(self, r):
- r.rpush('a', '1', '2', '3')
- assert r.rpop('a', 2) == [b'3', b'2']
- assert r.rpop('a', 1) == [b'1']
- assert r.rpop('a') is None
- assert r.rpop('a', 3) is None
+ r.rpush("a", "1", "2", "3")
+ assert r.rpop("a", 2) == [b"3", b"2"]
+ assert r.rpop("a", 1) == [b"1"]
+ assert r.rpop("a") is None
+ assert r.rpop("a", 3) is None
@pytest.mark.onlynoncluster
def test_rpoplpush(self, r):
- r.rpush('a', 'a1', 'a2', 'a3')
- r.rpush('b', 'b1', 'b2', 'b3')
- assert r.rpoplpush('a', 'b') == b'a3'
- assert r.lrange('a', 0, -1) == [b'a1', b'a2']
- assert r.lrange('b', 0, -1) == [b'a3', b'b1', b'b2', b'b3']
+ r.rpush("a", "a1", "a2", "a3")
+ r.rpush("b", "b1", "b2", "b3")
+ assert r.rpoplpush("a", "b") == b"a3"
+ assert r.lrange("a", 0, -1) == [b"a1", b"a2"]
+ assert r.lrange("b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"]
def test_rpush(self, r):
- assert r.rpush('a', '1') == 1
- assert r.rpush('a', '2') == 2
- assert r.rpush('a', '3', '4') == 4
- assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4']
+ assert r.rpush("a", "1") == 1
+ assert r.rpush("a", "2") == 2
+ assert r.rpush("a", "3", "4") == 4
+ assert r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4"]
- @skip_if_server_version_lt('6.0.6')
+ @skip_if_server_version_lt("6.0.6")
def test_lpos(self, r):
- assert r.rpush('a', 'a', 'b', 'c', '1', '2', '3', 'c', 'c') == 8
- assert r.lpos('a', 'a') == 0
- assert r.lpos('a', 'c') == 2
+ assert r.rpush("a", "a", "b", "c", "1", "2", "3", "c", "c") == 8
+ assert r.lpos("a", "a") == 0
+ assert r.lpos("a", "c") == 2
- assert r.lpos('a', 'c', rank=1) == 2
- assert r.lpos('a', 'c', rank=2) == 6
- assert r.lpos('a', 'c', rank=4) is None
- assert r.lpos('a', 'c', rank=-1) == 7
- assert r.lpos('a', 'c', rank=-2) == 6
+ assert r.lpos("a", "c", rank=1) == 2
+ assert r.lpos("a", "c", rank=2) == 6
+ assert r.lpos("a", "c", rank=4) is None
+ assert r.lpos("a", "c", rank=-1) == 7
+ assert r.lpos("a", "c", rank=-2) == 6
- assert r.lpos('a', 'c', count=0) == [2, 6, 7]
- assert r.lpos('a', 'c', count=1) == [2]
- assert r.lpos('a', 'c', count=2) == [2, 6]
- assert r.lpos('a', 'c', count=100) == [2, 6, 7]
+ assert r.lpos("a", "c", count=0) == [2, 6, 7]
+ assert r.lpos("a", "c", count=1) == [2]
+ assert r.lpos("a", "c", count=2) == [2, 6]
+ assert r.lpos("a", "c", count=100) == [2, 6, 7]
- assert r.lpos('a', 'c', count=0, rank=2) == [6, 7]
- assert r.lpos('a', 'c', count=2, rank=-1) == [7, 6]
+ assert r.lpos("a", "c", count=0, rank=2) == [6, 7]
+ assert r.lpos("a", "c", count=2, rank=-1) == [7, 6]
- assert r.lpos('axxx', 'c', count=0, rank=2) == []
- assert r.lpos('axxx', 'c') is None
+ assert r.lpos("axxx", "c", count=0, rank=2) == []
+ assert r.lpos("axxx", "c") is None
- assert r.lpos('a', 'x', count=2) == []
- assert r.lpos('a', 'x') is None
+ assert r.lpos("a", "x", count=2) == []
+ assert r.lpos("a", "x") is None
- assert r.lpos('a', 'a', count=0, maxlen=1) == [0]
- assert r.lpos('a', 'c', count=0, maxlen=1) == []
- assert r.lpos('a', 'c', count=0, maxlen=3) == [2]
- assert r.lpos('a', 'c', count=0, maxlen=3, rank=-1) == [7, 6]
- assert r.lpos('a', 'c', count=0, maxlen=7, rank=2) == [6]
+ assert r.lpos("a", "a", count=0, maxlen=1) == [0]
+ assert r.lpos("a", "c", count=0, maxlen=1) == []
+ assert r.lpos("a", "c", count=0, maxlen=3) == [2]
+ assert r.lpos("a", "c", count=0, maxlen=3, rank=-1) == [7, 6]
+ assert r.lpos("a", "c", count=0, maxlen=7, rank=2) == [6]
def test_rpushx(self, r):
- assert r.rpushx('a', 'b') == 0
- assert r.lrange('a', 0, -1) == []
- r.rpush('a', '1', '2', '3')
- assert r.rpushx('a', '4') == 4
- assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4']
+ assert r.rpushx("a", "b") == 0
+ assert r.lrange("a", 0, -1) == []
+ r.rpush("a", "1", "2", "3")
+ assert r.rpushx("a", "4") == 4
+ assert r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4"]
# SCAN COMMANDS
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_scan(self, r):
- r.set('a', 1)
- r.set('b', 2)
- r.set('c', 3)
+ r.set("a", 1)
+ r.set("b", 2)
+ r.set("c", 3)
cursor, keys = r.scan()
assert cursor == 0
- assert set(keys) == {b'a', b'b', b'c'}
- _, keys = r.scan(match='a')
- assert set(keys) == {b'a'}
+ assert set(keys) == {b"a", b"b", b"c"}
+ _, keys = r.scan(match="a")
+ assert set(keys) == {b"a"}
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_scan_type(self, r):
- r.sadd('a-set', 1)
- r.hset('a-hash', 'foo', 2)
- r.lpush('a-list', 'aux', 3)
- _, keys = r.scan(match='a*', _type='SET')
- assert set(keys) == {b'a-set'}
+ r.sadd("a-set", 1)
+ r.hset("a-hash", "foo", 2)
+ r.lpush("a-list", "aux", 3)
+ _, keys = r.scan(match="a*", _type="SET")
+ assert set(keys) == {b"a-set"}
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_scan_iter(self, r):
- r.set('a', 1)
- r.set('b', 2)
- r.set('c', 3)
+ r.set("a", 1)
+ r.set("b", 2)
+ r.set("c", 3)
keys = list(r.scan_iter())
- assert set(keys) == {b'a', b'b', b'c'}
- keys = list(r.scan_iter(match='a'))
- assert set(keys) == {b'a'}
+ assert set(keys) == {b"a", b"b", b"c"}
+ keys = list(r.scan_iter(match="a"))
+ assert set(keys) == {b"a"}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_sscan(self, r):
- r.sadd('a', 1, 2, 3)
- cursor, members = r.sscan('a')
+ r.sadd("a", 1, 2, 3)
+ cursor, members = r.sscan("a")
assert cursor == 0
- assert set(members) == {b'1', b'2', b'3'}
- _, members = r.sscan('a', match=b'1')
- assert set(members) == {b'1'}
+ assert set(members) == {b"1", b"2", b"3"}
+ _, members = r.sscan("a", match=b"1")
+ assert set(members) == {b"1"}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_sscan_iter(self, r):
- r.sadd('a', 1, 2, 3)
- members = list(r.sscan_iter('a'))
- assert set(members) == {b'1', b'2', b'3'}
- members = list(r.sscan_iter('a', match=b'1'))
- assert set(members) == {b'1'}
+ r.sadd("a", 1, 2, 3)
+ members = list(r.sscan_iter("a"))
+ assert set(members) == {b"1", b"2", b"3"}
+ members = list(r.sscan_iter("a", match=b"1"))
+ assert set(members) == {b"1"}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_hscan(self, r):
- r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3})
- cursor, dic = r.hscan('a')
+ r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ cursor, dic = r.hscan("a")
assert cursor == 0
- assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'}
- _, dic = r.hscan('a', match='a')
- assert dic == {b'a': b'1'}
+ assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"}
+ _, dic = r.hscan("a", match="a")
+ assert dic == {b"a": b"1"}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_hscan_iter(self, r):
- r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3})
- dic = dict(r.hscan_iter('a'))
- assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'}
- dic = dict(r.hscan_iter('a', match='a'))
- assert dic == {b'a': b'1'}
+ r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ dic = dict(r.hscan_iter("a"))
+ assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"}
+ dic = dict(r.hscan_iter("a", match="a"))
+ assert dic == {b"a": b"1"}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_zscan(self, r):
- r.zadd('a', {'a': 1, 'b': 2, 'c': 3})
- cursor, pairs = r.zscan('a')
+ r.zadd("a", {"a": 1, "b": 2, "c": 3})
+ cursor, pairs = r.zscan("a")
assert cursor == 0
- assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)}
- _, pairs = r.zscan('a', match='a')
- assert set(pairs) == {(b'a', 1)}
+ assert set(pairs) == {(b"a", 1), (b"b", 2), (b"c", 3)}
+ _, pairs = r.zscan("a", match="a")
+ assert set(pairs) == {(b"a", 1)}
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_zscan_iter(self, r):
- r.zadd('a', {'a': 1, 'b': 2, 'c': 3})
- pairs = list(r.zscan_iter('a'))
- assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)}
- pairs = list(r.zscan_iter('a', match='a'))
- assert set(pairs) == {(b'a', 1)}
+ r.zadd("a", {"a": 1, "b": 2, "c": 3})
+ pairs = list(r.zscan_iter("a"))
+ assert set(pairs) == {(b"a", 1), (b"b", 2), (b"c", 3)}
+ pairs = list(r.zscan_iter("a", match="a"))
+ assert set(pairs) == {(b"a", 1)}
# SET COMMANDS
def test_sadd(self, r):
- members = {b'1', b'2', b'3'}
- r.sadd('a', *members)
- assert r.smembers('a') == members
+ members = {b"1", b"2", b"3"}
+ r.sadd("a", *members)
+ assert r.smembers("a") == members
def test_scard(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.scard('a') == 3
+ r.sadd("a", "1", "2", "3")
+ assert r.scard("a") == 3
@pytest.mark.onlynoncluster
def test_sdiff(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.sdiff('a', 'b') == {b'1', b'2', b'3'}
- r.sadd('b', '2', '3')
- assert r.sdiff('a', 'b') == {b'1'}
+ r.sadd("a", "1", "2", "3")
+ assert r.sdiff("a", "b") == {b"1", b"2", b"3"}
+ r.sadd("b", "2", "3")
+ assert r.sdiff("a", "b") == {b"1"}
@pytest.mark.onlynoncluster
def test_sdiffstore(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.sdiffstore('c', 'a', 'b') == 3
- assert r.smembers('c') == {b'1', b'2', b'3'}
- r.sadd('b', '2', '3')
- assert r.sdiffstore('c', 'a', 'b') == 1
- assert r.smembers('c') == {b'1'}
+ r.sadd("a", "1", "2", "3")
+ assert r.sdiffstore("c", "a", "b") == 3
+ assert r.smembers("c") == {b"1", b"2", b"3"}
+ r.sadd("b", "2", "3")
+ assert r.sdiffstore("c", "a", "b") == 1
+ assert r.smembers("c") == {b"1"}
@pytest.mark.onlynoncluster
def test_sinter(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.sinter('a', 'b') == set()
- r.sadd('b', '2', '3')
- assert r.sinter('a', 'b') == {b'2', b'3'}
+ r.sadd("a", "1", "2", "3")
+ assert r.sinter("a", "b") == set()
+ r.sadd("b", "2", "3")
+ assert r.sinter("a", "b") == {b"2", b"3"}
@pytest.mark.onlynoncluster
def test_sinterstore(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.sinterstore('c', 'a', 'b') == 0
- assert r.smembers('c') == set()
- r.sadd('b', '2', '3')
- assert r.sinterstore('c', 'a', 'b') == 2
- assert r.smembers('c') == {b'2', b'3'}
+ r.sadd("a", "1", "2", "3")
+ assert r.sinterstore("c", "a", "b") == 0
+ assert r.smembers("c") == set()
+ r.sadd("b", "2", "3")
+ assert r.sinterstore("c", "a", "b") == 2
+ assert r.smembers("c") == {b"2", b"3"}
def test_sismember(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.sismember('a', '1')
- assert r.sismember('a', '2')
- assert r.sismember('a', '3')
- assert not r.sismember('a', '4')
+ r.sadd("a", "1", "2", "3")
+ assert r.sismember("a", "1")
+ assert r.sismember("a", "2")
+ assert r.sismember("a", "3")
+ assert not r.sismember("a", "4")
def test_smembers(self, r):
- r.sadd('a', '1', '2', '3')
- assert r.smembers('a') == {b'1', b'2', b'3'}
+ r.sadd("a", "1", "2", "3")
+ assert r.smembers("a") == {b"1", b"2", b"3"}
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_smismember(self, r):
- r.sadd('a', '1', '2', '3')
+ r.sadd("a", "1", "2", "3")
result_list = [True, False, True, True]
- assert r.smismember('a', '1', '4', '2', '3') == result_list
- assert r.smismember('a', ['1', '4', '2', '3']) == result_list
+ assert r.smismember("a", "1", "4", "2", "3") == result_list
+ assert r.smismember("a", ["1", "4", "2", "3"]) == result_list
@pytest.mark.onlynoncluster
def test_smove(self, r):
- r.sadd('a', 'a1', 'a2')
- r.sadd('b', 'b1', 'b2')
- assert r.smove('a', 'b', 'a1')
- assert r.smembers('a') == {b'a2'}
- assert r.smembers('b') == {b'b1', b'b2', b'a1'}
+ r.sadd("a", "a1", "a2")
+ r.sadd("b", "b1", "b2")
+ assert r.smove("a", "b", "a1")
+ assert r.smembers("a") == {b"a2"}
+ assert r.smembers("b") == {b"b1", b"b2", b"a1"}
def test_spop(self, r):
- s = [b'1', b'2', b'3']
- r.sadd('a', *s)
- value = r.spop('a')
+ s = [b"1", b"2", b"3"]
+ r.sadd("a", *s)
+ value = r.spop("a")
assert value in s
- assert r.smembers('a') == set(s) - {value}
+ assert r.smembers("a") == set(s) - {value}
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_spop_multi_value(self, r):
- s = [b'1', b'2', b'3']
- r.sadd('a', *s)
- values = r.spop('a', 2)
+ s = [b"1", b"2", b"3"]
+ r.sadd("a", *s)
+ values = r.spop("a", 2)
assert len(values) == 2
for value in values:
assert value in s
- assert r.spop('a', 1) == list(set(s) - set(values))
+ assert r.spop("a", 1) == list(set(s) - set(values))
def test_srandmember(self, r):
- s = [b'1', b'2', b'3']
- r.sadd('a', *s)
- assert r.srandmember('a') in s
+ s = [b"1", b"2", b"3"]
+ r.sadd("a", *s)
+ assert r.srandmember("a") in s
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_srandmember_multi_value(self, r):
- s = [b'1', b'2', b'3']
- r.sadd('a', *s)
- randoms = r.srandmember('a', number=2)
+ s = [b"1", b"2", b"3"]
+ r.sadd("a", *s)
+ randoms = r.srandmember("a", number=2)
assert len(randoms) == 2
assert set(randoms).intersection(s) == set(randoms)
def test_srem(self, r):
- r.sadd('a', '1', '2', '3', '4')
- assert r.srem('a', '5') == 0
- assert r.srem('a', '2', '4') == 2
- assert r.smembers('a') == {b'1', b'3'}
+ r.sadd("a", "1", "2", "3", "4")
+ assert r.srem("a", "5") == 0
+ assert r.srem("a", "2", "4") == 2
+ assert r.smembers("a") == {b"1", b"3"}
@pytest.mark.onlynoncluster
def test_sunion(self, r):
- r.sadd('a', '1', '2')
- r.sadd('b', '2', '3')
- assert r.sunion('a', 'b') == {b'1', b'2', b'3'}
+ r.sadd("a", "1", "2")
+ r.sadd("b", "2", "3")
+ assert r.sunion("a", "b") == {b"1", b"2", b"3"}
@pytest.mark.onlynoncluster
def test_sunionstore(self, r):
- r.sadd('a', '1', '2')
- r.sadd('b', '2', '3')
- assert r.sunionstore('c', 'a', 'b') == 3
- assert r.smembers('c') == {b'1', b'2', b'3'}
+ r.sadd("a", "1", "2")
+ r.sadd("b", "2", "3")
+ assert r.sunionstore("c", "a", "b") == 3
+ assert r.smembers("c") == {b"1", b"2", b"3"}
- @skip_if_server_version_lt('1.0.0')
+ @skip_if_server_version_lt("1.0.0")
def test_debug_segfault(self, r):
with pytest.raises(NotImplementedError):
r.debug_segfault()
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_script_debug(self, r):
with pytest.raises(NotImplementedError):
r.script_debug()
# SORTED SET COMMANDS
def test_zadd(self, r):
- mapping = {'a1': 1.0, 'a2': 2.0, 'a3': 3.0}
- r.zadd('a', mapping)
- assert r.zrange('a', 0, -1, withscores=True) == \
- [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)]
+ mapping = {"a1": 1.0, "a2": 2.0, "a3": 3.0}
+ r.zadd("a", mapping)
+ assert r.zrange("a", 0, -1, withscores=True) == [
+ (b"a1", 1.0),
+ (b"a2", 2.0),
+ (b"a3", 3.0),
+ ]
# error cases
with pytest.raises(exceptions.DataError):
- r.zadd('a', {})
+ r.zadd("a", {})
# cannot use both nx and xx options
with pytest.raises(exceptions.DataError):
- r.zadd('a', mapping, nx=True, xx=True)
+ r.zadd("a", mapping, nx=True, xx=True)
# cannot use the incr options with more than one value
with pytest.raises(exceptions.DataError):
- r.zadd('a', mapping, incr=True)
+ r.zadd("a", mapping, incr=True)
def test_zadd_nx(self, r):
- assert r.zadd('a', {'a1': 1}) == 1
- assert r.zadd('a', {'a1': 99, 'a2': 2}, nx=True) == 1
- assert r.zrange('a', 0, -1, withscores=True) == \
- [(b'a1', 1.0), (b'a2', 2.0)]
+ assert r.zadd("a", {"a1": 1}) == 1
+ assert r.zadd("a", {"a1": 99, "a2": 2}, nx=True) == 1
+ assert r.zrange("a", 0, -1, withscores=True) == [(b"a1", 1.0), (b"a2", 2.0)]
def test_zadd_xx(self, r):
- assert r.zadd('a', {'a1': 1}) == 1
- assert r.zadd('a', {'a1': 99, 'a2': 2}, xx=True) == 0
- assert r.zrange('a', 0, -1, withscores=True) == \
- [(b'a1', 99.0)]
+ assert r.zadd("a", {"a1": 1}) == 1
+ assert r.zadd("a", {"a1": 99, "a2": 2}, xx=True) == 0
+ assert r.zrange("a", 0, -1, withscores=True) == [(b"a1", 99.0)]
def test_zadd_ch(self, r):
- assert r.zadd('a', {'a1': 1}) == 1
- assert r.zadd('a', {'a1': 99, 'a2': 2}, ch=True) == 2
- assert r.zrange('a', 0, -1, withscores=True) == \
- [(b'a2', 2.0), (b'a1', 99.0)]
+ assert r.zadd("a", {"a1": 1}) == 1
+ assert r.zadd("a", {"a1": 99, "a2": 2}, ch=True) == 2
+ assert r.zrange("a", 0, -1, withscores=True) == [(b"a2", 2.0), (b"a1", 99.0)]
def test_zadd_incr(self, r):
- assert r.zadd('a', {'a1': 1}) == 1
- assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5
+ assert r.zadd("a", {"a1": 1}) == 1
+ assert r.zadd("a", {"a1": 4.5}, incr=True) == 5.5
def test_zadd_incr_with_xx(self, r):
# this asks zadd to incr 'a1' only if it exists, but it clearly
# doesn't. Redis returns a null value in this case and so should
# redis-py
- assert r.zadd('a', {'a1': 1}, xx=True, incr=True) is None
+ assert r.zadd("a", {"a1": 1}, xx=True, incr=True) is None
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zadd_gt_lt(self, r):
for i in range(1, 20):
- r.zadd('a', {f'a{i}': i})
- assert r.zadd('a', {'a20': 5}, gt=3) == 1
+ r.zadd("a", {f"a{i}": i})
+ assert r.zadd("a", {"a20": 5}, gt=3) == 1
for i in range(1, 20):
- r.zadd('a', {f'a{i}': i})
- assert r.zadd('a', {'a2': 5}, lt=1) == 0
+ r.zadd("a", {f"a{i}": i})
+ assert r.zadd("a", {"a2": 5}, lt=1) == 0
# cannot use both nx and xx options
with pytest.raises(exceptions.DataError):
- r.zadd('a', {'a15': 155}, nx=True, lt=True)
- r.zadd('a', {'a15': 155}, nx=True, gt=True)
- r.zadd('a', {'a15': 155}, lt=True, gt=True)
+ r.zadd("a", {"a15": 155}, nx=True, lt=True)
+ r.zadd("a", {"a15": 155}, nx=True, gt=True)
+ r.zadd("a", {"a15": 155}, lt=True, gt=True)
def test_zcard(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zcard('a') == 3
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zcard("a") == 3
def test_zcount(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zcount('a', '-inf', '+inf') == 3
- assert r.zcount('a', 1, 2) == 2
- assert r.zcount('a', '(' + str(1), 2) == 1
- assert r.zcount('a', 1, '(' + str(2)) == 1
- assert r.zcount('a', 10, 20) == 0
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zcount("a", "-inf", "+inf") == 3
+ assert r.zcount("a", 1, 2) == 2
+ assert r.zcount("a", "(" + str(1), 2) == 1
+ assert r.zcount("a", 1, "(" + str(2)) == 1
+ assert r.zcount("a", 10, 20) == 0
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zdiff(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('b', {'a1': 1, 'a2': 2})
- assert r.zdiff(['a', 'b']) == [b'a3']
- assert r.zdiff(['a', 'b'], withscores=True) == [b'a3', b'3']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("b", {"a1": 1, "a2": 2})
+ assert r.zdiff(["a", "b"]) == [b"a3"]
+ assert r.zdiff(["a", "b"], withscores=True) == [b"a3", b"3"]
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zdiffstore(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('b', {'a1': 1, 'a2': 2})
- assert r.zdiffstore("out", ['a', 'b'])
- assert r.zrange("out", 0, -1) == [b'a3']
- assert r.zrange("out", 0, -1, withscores=True) == [(b'a3', 3.0)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("b", {"a1": 1, "a2": 2})
+ assert r.zdiffstore("out", ["a", "b"])
+ assert r.zrange("out", 0, -1) == [b"a3"]
+ assert r.zrange("out", 0, -1, withscores=True) == [(b"a3", 3.0)]
def test_zincrby(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zincrby('a', 1, 'a2') == 3.0
- assert r.zincrby('a', 5, 'a3') == 8.0
- assert r.zscore('a', 'a2') == 3.0
- assert r.zscore('a', 'a3') == 8.0
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zincrby("a", 1, "a2") == 3.0
+ assert r.zincrby("a", 5, "a3") == 8.0
+ assert r.zscore("a", "a2") == 3.0
+ assert r.zscore("a", "a3") == 8.0
- @skip_if_server_version_lt('2.8.9')
+ @skip_if_server_version_lt("2.8.9")
def test_zlexcount(self, r):
- r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0})
- assert r.zlexcount('a', '-', '+') == 7
- assert r.zlexcount('a', '[b', '[f') == 5
+ r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0})
+ assert r.zlexcount("a", "-", "+") == 7
+ assert r.zlexcount("a", "[b", "[f") == 5
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zinter(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinter(['a', 'b', 'c']) == [b'a3', b'a1']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinter(["a", "b", "c"]) == [b"a3", b"a1"]
# invalid aggregation
with pytest.raises(exceptions.DataError):
- r.zinter(['a', 'b', 'c'], aggregate='foo', withscores=True)
+ r.zinter(["a", "b", "c"], aggregate="foo", withscores=True)
# aggregate with SUM
- assert r.zinter(['a', 'b', 'c'], withscores=True) \
- == [(b'a3', 8), (b'a1', 9)]
+ assert r.zinter(["a", "b", "c"], withscores=True) == [(b"a3", 8), (b"a1", 9)]
# aggregate with MAX
- assert r.zinter(['a', 'b', 'c'], aggregate='MAX', withscores=True) \
- == [(b'a3', 5), (b'a1', 6)]
+ assert r.zinter(["a", "b", "c"], aggregate="MAX", withscores=True) == [
+ (b"a3", 5),
+ (b"a1", 6),
+ ]
# aggregate with MIN
- assert r.zinter(['a', 'b', 'c'], aggregate='MIN', withscores=True) \
- == [(b'a1', 1), (b'a3', 1)]
+ assert r.zinter(["a", "b", "c"], aggregate="MIN", withscores=True) == [
+ (b"a1", 1),
+ (b"a3", 1),
+ ]
# with weights
- assert r.zinter({'a': 1, 'b': 2, 'c': 3}, withscores=True) \
- == [(b'a3', 20), (b'a1', 23)]
+ assert r.zinter({"a": 1, "b": 2, "c": 3}, withscores=True) == [
+ (b"a3", 20),
+ (b"a1", 23),
+ ]
@pytest.mark.onlynoncluster
def test_zinterstore_sum(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore('d', ['a', 'b', 'c']) == 2
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a3', 8), (b'a1', 9)]
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinterstore("d", ["a", "b", "c"]) == 2
+ assert r.zrange("d", 0, -1, withscores=True) == [(b"a3", 8), (b"a1", 9)]
@pytest.mark.onlynoncluster
def test_zinterstore_max(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a3', 5), (b'a1', 6)]
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinterstore("d", ["a", "b", "c"], aggregate="MAX") == 2
+ assert r.zrange("d", 0, -1, withscores=True) == [(b"a3", 5), (b"a1", 6)]
@pytest.mark.onlynoncluster
def test_zinterstore_min(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a1', 1), (b'a3', 3)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("b", {"a1": 2, "a2": 3, "a3": 5})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinterstore("d", ["a", "b", "c"], aggregate="MIN") == 2
+ assert r.zrange("d", 0, -1, withscores=True) == [(b"a1", 1), (b"a3", 3)]
@pytest.mark.onlynoncluster
def test_zinterstore_with_weight(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a3', 20), (b'a1', 23)]
-
- @skip_if_server_version_lt('4.9.0')
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zinterstore("d", {"a": 1, "b": 2, "c": 3}) == 2
+ assert r.zrange("d", 0, -1, withscores=True) == [(b"a3", 20), (b"a1", 23)]
+
+ @skip_if_server_version_lt("4.9.0")
def test_zpopmax(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zpopmax('a') == [(b'a3', 3)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zpopmax("a") == [(b"a3", 3)]
# with count
- assert r.zpopmax('a', count=2) == \
- [(b'a2', 2), (b'a1', 1)]
+ assert r.zpopmax("a", count=2) == [(b"a2", 2), (b"a1", 1)]
- @skip_if_server_version_lt('4.9.0')
+ @skip_if_server_version_lt("4.9.0")
def test_zpopmin(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zpopmin('a') == [(b'a1', 1)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zpopmin("a") == [(b"a1", 1)]
# with count
- assert r.zpopmin('a', count=2) == \
- [(b'a2', 2), (b'a3', 3)]
+ assert r.zpopmin("a", count=2) == [(b"a2", 2), (b"a3", 3)]
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zrandemember(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zrandmember('a') is not None
- assert len(r.zrandmember('a', 2)) == 2
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zrandmember("a") is not None
+ assert len(r.zrandmember("a", 2)) == 2
# with scores
- assert len(r.zrandmember('a', 2, True)) == 4
+ assert len(r.zrandmember("a", 2, True)) == 4
# without duplications
- assert len(r.zrandmember('a', 10)) == 5
+ assert len(r.zrandmember("a", 10)) == 5
# with duplications
- assert len(r.zrandmember('a', -10)) == 10
+ assert len(r.zrandmember("a", -10)) == 10
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('4.9.0')
+ @skip_if_server_version_lt("4.9.0")
def test_bzpopmax(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2})
- r.zadd('b', {'b1': 10, 'b2': 20})
- assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b2', 20)
- assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b1', 10)
- assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a2', 2)
- assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a1', 1)
- assert r.bzpopmax(['b', 'a'], timeout=1) is None
- r.zadd('c', {'c1': 100})
- assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100)
-
- @pytest.mark.onlynoncluster
- @skip_if_server_version_lt('4.9.0')
+ r.zadd("a", {"a1": 1, "a2": 2})
+ r.zadd("b", {"b1": 10, "b2": 20})
+ assert r.bzpopmax(["b", "a"], timeout=1) == (b"b", b"b2", 20)
+ assert r.bzpopmax(["b", "a"], timeout=1) == (b"b", b"b1", 10)
+ assert r.bzpopmax(["b", "a"], timeout=1) == (b"a", b"a2", 2)
+ assert r.bzpopmax(["b", "a"], timeout=1) == (b"a", b"a1", 1)
+ assert r.bzpopmax(["b", "a"], timeout=1) is None
+ r.zadd("c", {"c1": 100})
+ assert r.bzpopmax("c", timeout=1) == (b"c", b"c1", 100)
+
+ @pytest.mark.onlynoncluster
+ @skip_if_server_version_lt("4.9.0")
def test_bzpopmin(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2})
- r.zadd('b', {'b1': 10, 'b2': 20})
- assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b1', 10)
- assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b2', 20)
- assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a1', 1)
- assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a2', 2)
- assert r.bzpopmin(['b', 'a'], timeout=1) is None
- r.zadd('c', {'c1': 100})
- assert r.bzpopmin('c', timeout=1) == (b'c', b'c1', 100)
+ r.zadd("a", {"a1": 1, "a2": 2})
+ r.zadd("b", {"b1": 10, "b2": 20})
+ assert r.bzpopmin(["b", "a"], timeout=1) == (b"b", b"b1", 10)
+ assert r.bzpopmin(["b", "a"], timeout=1) == (b"b", b"b2", 20)
+ assert r.bzpopmin(["b", "a"], timeout=1) == (b"a", b"a1", 1)
+ assert r.bzpopmin(["b", "a"], timeout=1) == (b"a", b"a2", 2)
+ assert r.bzpopmin(["b", "a"], timeout=1) is None
+ r.zadd("c", {"c1": 100})
+ assert r.bzpopmin("c", timeout=1) == (b"c", b"c1", 100)
def test_zrange(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zrange('a', 0, 1) == [b'a1', b'a2']
- assert r.zrange('a', 1, 2) == [b'a2', b'a3']
- assert r.zrange('a', 0, 2) == [b'a1', b'a2', b'a3']
- assert r.zrange('a', 0, 2, desc=True) == [b'a3', b'a2', b'a1']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zrange("a", 0, 1) == [b"a1", b"a2"]
+ assert r.zrange("a", 1, 2) == [b"a2", b"a3"]
+ assert r.zrange("a", 0, 2) == [b"a1", b"a2", b"a3"]
+ assert r.zrange("a", 0, 2, desc=True) == [b"a3", b"a2", b"a1"]
# withscores
- assert r.zrange('a', 0, 1, withscores=True) == \
- [(b'a1', 1.0), (b'a2', 2.0)]
- assert r.zrange('a', 1, 2, withscores=True) == \
- [(b'a2', 2.0), (b'a3', 3.0)]
+ assert r.zrange("a", 0, 1, withscores=True) == [(b"a1", 1.0), (b"a2", 2.0)]
+ assert r.zrange("a", 1, 2, withscores=True) == [(b"a2", 2.0), (b"a3", 3.0)]
# custom score function
- assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \
- [(b'a1', 1), (b'a2', 2)]
+ assert r.zrange("a", 0, 1, withscores=True, score_cast_func=int) == [
+ (b"a1", 1),
+ (b"a2", 2),
+ ]
def test_zrange_errors(self, r):
with pytest.raises(exceptions.DataError):
- r.zrange('a', 0, 1, byscore=True, bylex=True)
+ r.zrange("a", 0, 1, byscore=True, bylex=True)
with pytest.raises(exceptions.DataError):
- r.zrange('a', 0, 1, bylex=True, withscores=True)
+ r.zrange("a", 0, 1, bylex=True, withscores=True)
with pytest.raises(exceptions.DataError):
- r.zrange('a', 0, 1, byscore=True, withscores=True, offset=4)
+ r.zrange("a", 0, 1, byscore=True, withscores=True, offset=4)
with pytest.raises(exceptions.DataError):
- r.zrange('a', 0, 1, byscore=True, withscores=True, num=2)
+ r.zrange("a", 0, 1, byscore=True, withscores=True, num=2)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zrange_params(self, r):
# bylex
- r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0})
- assert r.zrange('a', '[aaa', '(g', bylex=True) == \
- [b'b', b'c', b'd', b'e', b'f']
- assert r.zrange('a', '[f', '+', bylex=True) == [b'f', b'g']
- assert r.zrange('a', '+', '[f', desc=True, bylex=True) == [b'g', b'f']
- assert r.zrange('a', '-', '+', bylex=True, offset=3, num=2) == \
- [b'd', b'e']
- assert r.zrange('a', '+', '-', desc=True, bylex=True,
- offset=3, num=2) == \
- [b'd', b'c']
+ r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0})
+ assert r.zrange("a", "[aaa", "(g", bylex=True) == [b"b", b"c", b"d", b"e", b"f"]
+ assert r.zrange("a", "[f", "+", bylex=True) == [b"f", b"g"]
+ assert r.zrange("a", "+", "[f", desc=True, bylex=True) == [b"g", b"f"]
+ assert r.zrange("a", "-", "+", bylex=True, offset=3, num=2) == [b"d", b"e"]
+ assert r.zrange("a", "+", "-", desc=True, bylex=True, offset=3, num=2) == [
+ b"d",
+ b"c",
+ ]
# byscore
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zrange('a', 2, 4, byscore=True, offset=1, num=2) == \
- [b'a3', b'a4']
- assert r.zrange('a', 4, 2, desc=True, byscore=True,
- offset=1, num=2) == \
- [b'a3', b'a2']
- assert r.zrange('a', 2, 4, byscore=True, withscores=True) == \
- [(b'a2', 2.0), (b'a3', 3.0), (b'a4', 4.0)]
- assert r.zrange('a', 4, 2, desc=True, byscore=True,
- withscores=True, score_cast_func=int) == \
- [(b'a4', 4), (b'a3', 3), (b'a2', 2)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zrange("a", 2, 4, byscore=True, offset=1, num=2) == [b"a3", b"a4"]
+ assert r.zrange("a", 4, 2, desc=True, byscore=True, offset=1, num=2) == [
+ b"a3",
+ b"a2",
+ ]
+ assert r.zrange("a", 2, 4, byscore=True, withscores=True) == [
+ (b"a2", 2.0),
+ (b"a3", 3.0),
+ (b"a4", 4.0),
+ ]
+ assert r.zrange(
+ "a", 4, 2, desc=True, byscore=True, withscores=True, score_cast_func=int
+ ) == [(b"a4", 4), (b"a3", 3), (b"a2", 2)]
# rev
- assert r.zrange('a', 0, 1, desc=True) == [b'a5', b'a4']
+ assert r.zrange("a", 0, 1, desc=True) == [b"a5", b"a4"]
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zrangestore(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zrangestore('b', 'a', 0, 1)
- assert r.zrange('b', 0, -1) == [b'a1', b'a2']
- assert r.zrangestore('b', 'a', 1, 2)
- assert r.zrange('b', 0, -1) == [b'a2', b'a3']
- assert r.zrange('b', 0, -1, withscores=True) == \
- [(b'a2', 2), (b'a3', 3)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zrangestore("b", "a", 0, 1)
+ assert r.zrange("b", 0, -1) == [b"a1", b"a2"]
+ assert r.zrangestore("b", "a", 1, 2)
+ assert r.zrange("b", 0, -1) == [b"a2", b"a3"]
+ assert r.zrange("b", 0, -1, withscores=True) == [(b"a2", 2), (b"a3", 3)]
# reversed order
- assert r.zrangestore('b', 'a', 1, 2, desc=True)
- assert r.zrange('b', 0, -1) == [b'a1', b'a2']
+ assert r.zrangestore("b", "a", 1, 2, desc=True)
+ assert r.zrange("b", 0, -1) == [b"a1", b"a2"]
# by score
- assert r.zrangestore('b', 'a', 2, 1, byscore=True,
- offset=0, num=1, desc=True)
- assert r.zrange('b', 0, -1) == [b'a2']
+ assert r.zrangestore("b", "a", 2, 1, byscore=True, offset=0, num=1, desc=True)
+ assert r.zrange("b", 0, -1) == [b"a2"]
# by lex
- assert r.zrangestore('b', 'a', '[a2', '(a3', bylex=True,
- offset=0, num=1)
- assert r.zrange('b', 0, -1) == [b'a2']
+ assert r.zrangestore("b", "a", "[a2", "(a3", bylex=True, offset=0, num=1)
+ assert r.zrange("b", 0, -1) == [b"a2"]
- @skip_if_server_version_lt('2.8.9')
+ @skip_if_server_version_lt("2.8.9")
def test_zrangebylex(self, r):
- r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0})
- assert r.zrangebylex('a', '-', '[c') == [b'a', b'b', b'c']
- assert r.zrangebylex('a', '-', '(c') == [b'a', b'b']
- assert r.zrangebylex('a', '[aaa', '(g') == \
- [b'b', b'c', b'd', b'e', b'f']
- assert r.zrangebylex('a', '[f', '+') == [b'f', b'g']
- assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b'd', b'e']
-
- @skip_if_server_version_lt('2.9.9')
+ r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0})
+ assert r.zrangebylex("a", "-", "[c") == [b"a", b"b", b"c"]
+ assert r.zrangebylex("a", "-", "(c") == [b"a", b"b"]
+ assert r.zrangebylex("a", "[aaa", "(g") == [b"b", b"c", b"d", b"e", b"f"]
+ assert r.zrangebylex("a", "[f", "+") == [b"f", b"g"]
+ assert r.zrangebylex("a", "-", "+", start=3, num=2) == [b"d", b"e"]
+
+ @skip_if_server_version_lt("2.9.9")
def test_zrevrangebylex(self, r):
- r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0})
- assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a']
- assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a']
- assert r.zrevrangebylex('a', '(g', '[aaa') == \
- [b'f', b'e', b'd', b'c', b'b']
- assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f']
- assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \
- [b'd', b'c']
+ r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0})
+ assert r.zrevrangebylex("a", "[c", "-") == [b"c", b"b", b"a"]
+ assert r.zrevrangebylex("a", "(c", "-") == [b"b", b"a"]
+ assert r.zrevrangebylex("a", "(g", "[aaa") == [b"f", b"e", b"d", b"c", b"b"]
+ assert r.zrevrangebylex("a", "+", "[f") == [b"g", b"f"]
+ assert r.zrevrangebylex("a", "+", "-", start=3, num=2) == [b"d", b"c"]
def test_zrangebyscore(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zrangebyscore('a', 2, 4) == [b'a2', b'a3', b'a4']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zrangebyscore("a", 2, 4) == [b"a2", b"a3", b"a4"]
# slicing with start/num
- assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \
- [b'a3', b'a4']
+ assert r.zrangebyscore("a", 2, 4, start=1, num=2) == [b"a3", b"a4"]
# withscores
- assert r.zrangebyscore('a', 2, 4, withscores=True) == \
- [(b'a2', 2.0), (b'a3', 3.0), (b'a4', 4.0)]
- assert r.zrangebyscore('a', 2, 4, withscores=True,
- score_cast_func=int) == \
- [(b'a2', 2), (b'a3', 3), (b'a4', 4)]
+ assert r.zrangebyscore("a", 2, 4, withscores=True) == [
+ (b"a2", 2.0),
+ (b"a3", 3.0),
+ (b"a4", 4.0),
+ ]
+ assert r.zrangebyscore("a", 2, 4, withscores=True, score_cast_func=int) == [
+ (b"a2", 2),
+ (b"a3", 3),
+ (b"a4", 4),
+ ]
def test_zrank(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zrank('a', 'a1') == 0
- assert r.zrank('a', 'a2') == 1
- assert r.zrank('a', 'a6') is None
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zrank("a", "a1") == 0
+ assert r.zrank("a", "a2") == 1
+ assert r.zrank("a", "a6") is None
def test_zrem(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zrem('a', 'a2') == 1
- assert r.zrange('a', 0, -1) == [b'a1', b'a3']
- assert r.zrem('a', 'b') == 0
- assert r.zrange('a', 0, -1) == [b'a1', b'a3']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zrem("a", "a2") == 1
+ assert r.zrange("a", 0, -1) == [b"a1", b"a3"]
+ assert r.zrem("a", "b") == 0
+ assert r.zrange("a", 0, -1) == [b"a1", b"a3"]
def test_zrem_multiple_keys(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zrem('a', 'a1', 'a2') == 2
- assert r.zrange('a', 0, 5) == [b'a3']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zrem("a", "a1", "a2") == 2
+ assert r.zrange("a", 0, 5) == [b"a3"]
- @skip_if_server_version_lt('2.8.9')
+ @skip_if_server_version_lt("2.8.9")
def test_zremrangebylex(self, r):
- r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0})
- assert r.zremrangebylex('a', '-', '[c') == 3
- assert r.zrange('a', 0, -1) == [b'd', b'e', b'f', b'g']
- assert r.zremrangebylex('a', '[f', '+') == 2
- assert r.zrange('a', 0, -1) == [b'd', b'e']
- assert r.zremrangebylex('a', '[h', '+') == 0
- assert r.zrange('a', 0, -1) == [b'd', b'e']
+ r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0})
+ assert r.zremrangebylex("a", "-", "[c") == 3
+ assert r.zrange("a", 0, -1) == [b"d", b"e", b"f", b"g"]
+ assert r.zremrangebylex("a", "[f", "+") == 2
+ assert r.zrange("a", 0, -1) == [b"d", b"e"]
+ assert r.zremrangebylex("a", "[h", "+") == 0
+ assert r.zrange("a", 0, -1) == [b"d", b"e"]
def test_zremrangebyrank(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zremrangebyrank('a', 1, 3) == 3
- assert r.zrange('a', 0, 5) == [b'a1', b'a5']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zremrangebyrank("a", 1, 3) == 3
+ assert r.zrange("a", 0, 5) == [b"a1", b"a5"]
def test_zremrangebyscore(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zremrangebyscore('a', 2, 4) == 3
- assert r.zrange('a', 0, -1) == [b'a1', b'a5']
- assert r.zremrangebyscore('a', 2, 4) == 0
- assert r.zrange('a', 0, -1) == [b'a1', b'a5']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zremrangebyscore("a", 2, 4) == 3
+ assert r.zrange("a", 0, -1) == [b"a1", b"a5"]
+ assert r.zremrangebyscore("a", 2, 4) == 0
+ assert r.zrange("a", 0, -1) == [b"a1", b"a5"]
def test_zrevrange(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zrevrange('a', 0, 1) == [b'a3', b'a2']
- assert r.zrevrange('a', 1, 2) == [b'a2', b'a1']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zrevrange("a", 0, 1) == [b"a3", b"a2"]
+ assert r.zrevrange("a", 1, 2) == [b"a2", b"a1"]
# withscores
- assert r.zrevrange('a', 0, 1, withscores=True) == \
- [(b'a3', 3.0), (b'a2', 2.0)]
- assert r.zrevrange('a', 1, 2, withscores=True) == \
- [(b'a2', 2.0), (b'a1', 1.0)]
+ assert r.zrevrange("a", 0, 1, withscores=True) == [(b"a3", 3.0), (b"a2", 2.0)]
+ assert r.zrevrange("a", 1, 2, withscores=True) == [(b"a2", 2.0), (b"a1", 1.0)]
# custom score function
- assert r.zrevrange('a', 0, 1, withscores=True,
- score_cast_func=int) == \
- [(b'a3', 3.0), (b'a2', 2.0)]
+ assert r.zrevrange("a", 0, 1, withscores=True, score_cast_func=int) == [
+ (b"a3", 3.0),
+ (b"a2", 2.0),
+ ]
def test_zrevrangebyscore(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zrevrangebyscore('a', 4, 2) == [b'a4', b'a3', b'a2']
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zrevrangebyscore("a", 4, 2) == [b"a4", b"a3", b"a2"]
# slicing with start/num
- assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \
- [b'a3', b'a2']
+ assert r.zrevrangebyscore("a", 4, 2, start=1, num=2) == [b"a3", b"a2"]
# withscores
- assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \
- [(b'a4', 4.0), (b'a3', 3.0), (b'a2', 2.0)]
+ assert r.zrevrangebyscore("a", 4, 2, withscores=True) == [
+ (b"a4", 4.0),
+ (b"a3", 3.0),
+ (b"a2", 2.0),
+ ]
# custom score function
- assert r.zrevrangebyscore('a', 4, 2, withscores=True,
- score_cast_func=int) == \
- [(b'a4', 4), (b'a3', 3), (b'a2', 2)]
+ assert r.zrevrangebyscore("a", 4, 2, withscores=True, score_cast_func=int) == [
+ (b"a4", 4),
+ (b"a3", 3),
+ (b"a2", 2),
+ ]
def test_zrevrank(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
- assert r.zrevrank('a', 'a1') == 4
- assert r.zrevrank('a', 'a2') == 3
- assert r.zrevrank('a', 'a6') is None
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5})
+ assert r.zrevrank("a", "a1") == 4
+ assert r.zrevrank("a", "a2") == 3
+ assert r.zrevrank("a", "a6") is None
def test_zscore(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- assert r.zscore('a', 'a1') == 1.0
- assert r.zscore('a', 'a2') == 2.0
- assert r.zscore('a', 'a4') is None
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ assert r.zscore("a", "a1") == 1.0
+ assert r.zscore("a", "a2") == 2.0
+ assert r.zscore("a", "a4") is None
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_zunion(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
# sum
- assert r.zunion(['a', 'b', 'c']) == \
- [b'a2', b'a4', b'a3', b'a1']
- assert r.zunion(['a', 'b', 'c'], withscores=True) == \
- [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ assert r.zunion(["a", "b", "c"]) == [b"a2", b"a4", b"a3", b"a1"]
+ assert r.zunion(["a", "b", "c"], withscores=True) == [
+ (b"a2", 3),
+ (b"a4", 4),
+ (b"a3", 8),
+ (b"a1", 9),
+ ]
# max
- assert r.zunion(['a', 'b', 'c'], aggregate='MAX', withscores=True)\
- == [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ assert r.zunion(["a", "b", "c"], aggregate="MAX", withscores=True) == [
+ (b"a2", 2),
+ (b"a4", 4),
+ (b"a3", 5),
+ (b"a1", 6),
+ ]
# min
- assert r.zunion(['a', 'b', 'c'], aggregate='MIN', withscores=True)\
- == [(b'a1', 1), (b'a2', 1), (b'a3', 1), (b'a4', 4)]
+ assert r.zunion(["a", "b", "c"], aggregate="MIN", withscores=True) == [
+ (b"a1", 1),
+ (b"a2", 1),
+ (b"a3", 1),
+ (b"a4", 4),
+ ]
# with weight
- assert r.zunion({'a': 1, 'b': 2, 'c': 3}, withscores=True)\
- == [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+ assert r.zunion({"a": 1, "b": 2, "c": 3}, withscores=True) == [
+ (b"a2", 5),
+ (b"a4", 12),
+ (b"a3", 20),
+ (b"a1", 23),
+ ]
@pytest.mark.onlynoncluster
def test_zunionstore_sum(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore('d', ['a', 'b', 'c']) == 4
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zunionstore("d", ["a", "b", "c"]) == 4
+ assert r.zrange("d", 0, -1, withscores=True) == [
+ (b"a2", 3),
+ (b"a4", 4),
+ (b"a3", 8),
+ (b"a1", 9),
+ ]
@pytest.mark.onlynoncluster
def test_zunionstore_max(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zunionstore("d", ["a", "b", "c"], aggregate="MAX") == 4
+ assert r.zrange("d", 0, -1, withscores=True) == [
+ (b"a2", 2),
+ (b"a4", 4),
+ (b"a3", 5),
+ (b"a1", 6),
+ ]
@pytest.mark.onlynoncluster
def test_zunionstore_min(self, r):
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 4})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zunionstore("d", ["a", "b", "c"], aggregate="MIN") == 4
+ assert r.zrange("d", 0, -1, withscores=True) == [
+ (b"a1", 1),
+ (b"a2", 2),
+ (b"a3", 3),
+ (b"a4", 4),
+ ]
@pytest.mark.onlynoncluster
def test_zunionstore_with_weight(self, r):
- r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
- r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
- r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4})
- assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4
- assert r.zrange('d', 0, -1, withscores=True) == \
- [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
-
- @skip_if_server_version_lt('6.1.240')
+ r.zadd("a", {"a1": 1, "a2": 1, "a3": 1})
+ r.zadd("b", {"a1": 2, "a2": 2, "a3": 2})
+ r.zadd("c", {"a1": 6, "a3": 5, "a4": 4})
+ assert r.zunionstore("d", {"a": 1, "b": 2, "c": 3}) == 4
+ assert r.zrange("d", 0, -1, withscores=True) == [
+ (b"a2", 5),
+ (b"a4", 12),
+ (b"a3", 20),
+ (b"a1", 23),
+ ]
+
+ @skip_if_server_version_lt("6.1.240")
def test_zmscore(self, r):
with pytest.raises(exceptions.DataError):
- r.zmscore('invalid_key', [])
+ r.zmscore("invalid_key", [])
- assert r.zmscore('invalid_key', ['invalid_member']) == [None]
+ assert r.zmscore("invalid_key", ["invalid_member"]) == [None]
- r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3.5})
- assert r.zmscore('a', ['a1', 'a2', 'a3', 'a4']) == \
- [1.0, 2.0, 3.5, None]
+ r.zadd("a", {"a1": 1, "a2": 2, "a3": 3.5})
+ assert r.zmscore("a", ["a1", "a2", "a3", "a4"]) == [1.0, 2.0, 3.5, None]
# HYPERLOGLOG TESTS
- @skip_if_server_version_lt('2.8.9')
+ @skip_if_server_version_lt("2.8.9")
def test_pfadd(self, r):
- members = {b'1', b'2', b'3'}
- assert r.pfadd('a', *members) == 1
- assert r.pfadd('a', *members) == 0
- assert r.pfcount('a') == len(members)
+ members = {b"1", b"2", b"3"}
+ assert r.pfadd("a", *members) == 1
+ assert r.pfadd("a", *members) == 0
+ assert r.pfcount("a") == len(members)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.9')
+ @skip_if_server_version_lt("2.8.9")
def test_pfcount(self, r):
- members = {b'1', b'2', b'3'}
- r.pfadd('a', *members)
- assert r.pfcount('a') == len(members)
- members_b = {b'2', b'3', b'4'}
- r.pfadd('b', *members_b)
- assert r.pfcount('b') == len(members_b)
- assert r.pfcount('a', 'b') == len(members_b.union(members))
+ members = {b"1", b"2", b"3"}
+ r.pfadd("a", *members)
+ assert r.pfcount("a") == len(members)
+ members_b = {b"2", b"3", b"4"}
+ r.pfadd("b", *members_b)
+ assert r.pfcount("b") == len(members_b)
+ assert r.pfcount("a", "b") == len(members_b.union(members))
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.9')
+ @skip_if_server_version_lt("2.8.9")
def test_pfmerge(self, r):
- mema = {b'1', b'2', b'3'}
- memb = {b'2', b'3', b'4'}
- memc = {b'5', b'6', b'7'}
- r.pfadd('a', *mema)
- r.pfadd('b', *memb)
- r.pfadd('c', *memc)
- r.pfmerge('d', 'c', 'a')
- assert r.pfcount('d') == 6
- r.pfmerge('d', 'b')
- assert r.pfcount('d') == 7
+ mema = {b"1", b"2", b"3"}
+ memb = {b"2", b"3", b"4"}
+ memc = {b"5", b"6", b"7"}
+ r.pfadd("a", *mema)
+ r.pfadd("b", *memb)
+ r.pfadd("c", *memc)
+ r.pfmerge("d", "c", "a")
+ assert r.pfcount("d") == 6
+ r.pfmerge("d", "b")
+ assert r.pfcount("d") == 7
# HASH COMMANDS
def test_hget_and_hset(self, r):
- r.hset('a', mapping={'1': 1, '2': 2, '3': 3})
- assert r.hget('a', '1') == b'1'
- assert r.hget('a', '2') == b'2'
- assert r.hget('a', '3') == b'3'
+ r.hset("a", mapping={"1": 1, "2": 2, "3": 3})
+ assert r.hget("a", "1") == b"1"
+ assert r.hget("a", "2") == b"2"
+ assert r.hget("a", "3") == b"3"
# field was updated, redis returns 0
- assert r.hset('a', '2', 5) == 0
- assert r.hget('a', '2') == b'5'
+ assert r.hset("a", "2", 5) == 0
+ assert r.hget("a", "2") == b"5"
# field is new, redis returns 1
- assert r.hset('a', '4', 4) == 1
- assert r.hget('a', '4') == b'4'
+ assert r.hset("a", "4", 4) == 1
+ assert r.hget("a", "4") == b"4"
# key inside of hash that doesn't exist returns null value
- assert r.hget('a', 'b') is None
+ assert r.hget("a", "b") is None
# keys with bool(key) == False
- assert r.hset('a', 0, 10) == 1
- assert r.hset('a', '', 10) == 1
+ assert r.hset("a", 0, 10) == 1
+ assert r.hset("a", "", 10) == 1
def test_hset_with_multi_key_values(self, r):
- r.hset('a', mapping={'1': 1, '2': 2, '3': 3})
- assert r.hget('a', '1') == b'1'
- assert r.hget('a', '2') == b'2'
- assert r.hget('a', '3') == b'3'
+ r.hset("a", mapping={"1": 1, "2": 2, "3": 3})
+ assert r.hget("a", "1") == b"1"
+ assert r.hget("a", "2") == b"2"
+ assert r.hget("a", "3") == b"3"
- r.hset('b', "foo", "bar", mapping={'1': 1, '2': 2})
- assert r.hget('b', '1') == b'1'
- assert r.hget('b', '2') == b'2'
- assert r.hget('b', 'foo') == b'bar'
+ r.hset("b", "foo", "bar", mapping={"1": 1, "2": 2})
+ assert r.hget("b", "1") == b"1"
+ assert r.hget("b", "2") == b"2"
+ assert r.hget("b", "foo") == b"bar"
def test_hset_without_data(self, r):
with pytest.raises(exceptions.DataError):
r.hset("x")
def test_hdel(self, r):
- r.hset('a', mapping={'1': 1, '2': 2, '3': 3})
- assert r.hdel('a', '2') == 1
- assert r.hget('a', '2') is None
- assert r.hdel('a', '1', '3') == 2
- assert r.hlen('a') == 0
+ r.hset("a", mapping={"1": 1, "2": 2, "3": 3})
+ assert r.hdel("a", "2") == 1
+ assert r.hget("a", "2") is None
+ assert r.hdel("a", "1", "3") == 2
+ assert r.hlen("a") == 0
def test_hexists(self, r):
- r.hset('a', mapping={'1': 1, '2': 2, '3': 3})
- assert r.hexists('a', '1')
- assert not r.hexists('a', '4')
+ r.hset("a", mapping={"1": 1, "2": 2, "3": 3})
+ assert r.hexists("a", "1")
+ assert not r.hexists("a", "4")
def test_hgetall(self, r):
- h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'}
- r.hset('a', mapping=h)
- assert r.hgetall('a') == h
+ h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"}
+ r.hset("a", mapping=h)
+ assert r.hgetall("a") == h
def test_hincrby(self, r):
- assert r.hincrby('a', '1') == 1
- assert r.hincrby('a', '1', amount=2) == 3
- assert r.hincrby('a', '1', amount=-2) == 1
+ assert r.hincrby("a", "1") == 1
+ assert r.hincrby("a", "1", amount=2) == 3
+ assert r.hincrby("a", "1", amount=-2) == 1
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_hincrbyfloat(self, r):
- assert r.hincrbyfloat('a', '1') == 1.0
- assert r.hincrbyfloat('a', '1') == 2.0
- assert r.hincrbyfloat('a', '1', 1.2) == 3.2
+ assert r.hincrbyfloat("a", "1") == 1.0
+ assert r.hincrbyfloat("a", "1") == 2.0
+ assert r.hincrbyfloat("a", "1", 1.2) == 3.2
def test_hkeys(self, r):
- h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'}
- r.hset('a', mapping=h)
+ h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"}
+ r.hset("a", mapping=h)
local_keys = list(h.keys())
- remote_keys = r.hkeys('a')
- assert (sorted(local_keys) == sorted(remote_keys))
+ remote_keys = r.hkeys("a")
+ assert sorted(local_keys) == sorted(remote_keys)
def test_hlen(self, r):
- r.hset('a', mapping={'1': 1, '2': 2, '3': 3})
- assert r.hlen('a') == 3
+ r.hset("a", mapping={"1": 1, "2": 2, "3": 3})
+ assert r.hlen("a") == 3
def test_hmget(self, r):
- assert r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3})
- assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3']
+ assert r.hset("a", mapping={"a": 1, "b": 2, "c": 3})
+ assert r.hmget("a", "a", "b", "c") == [b"1", b"2", b"3"]
def test_hmset(self, r):
redis_class = type(r).__name__
- warning_message = (r'^{0}\.hmset\(\) is deprecated\. '
- r'Use {0}\.hset\(\) instead\.$'.format(redis_class))
- h = {b'a': b'1', b'b': b'2', b'c': b'3'}
+ warning_message = (
+ r"^{0}\.hmset\(\) is deprecated\. "
+ r"Use {0}\.hset\(\) instead\.$".format(redis_class)
+ )
+ h = {b"a": b"1", b"b": b"2", b"c": b"3"}
with pytest.warns(DeprecationWarning, match=warning_message):
- assert r.hmset('a', h)
- assert r.hgetall('a') == h
+ assert r.hmset("a", h)
+ assert r.hgetall("a") == h
def test_hsetnx(self, r):
# Initially set the hash field
- assert r.hsetnx('a', '1', 1)
- assert r.hget('a', '1') == b'1'
- assert not r.hsetnx('a', '1', 2)
- assert r.hget('a', '1') == b'1'
+ assert r.hsetnx("a", "1", 1)
+ assert r.hget("a", "1") == b"1"
+ assert not r.hsetnx("a", "1", 2)
+ assert r.hget("a", "1") == b"1"
def test_hvals(self, r):
- h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'}
- r.hset('a', mapping=h)
+ h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"}
+ r.hset("a", mapping=h)
local_vals = list(h.values())
- remote_vals = r.hvals('a')
+ remote_vals = r.hvals("a")
assert sorted(local_vals) == sorted(remote_vals)
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_hstrlen(self, r):
- r.hset('a', mapping={'1': '22', '2': '333'})
- assert r.hstrlen('a', '1') == 2
- assert r.hstrlen('a', '2') == 3
+ r.hset("a", mapping={"1": "22", "2": "333"})
+ assert r.hstrlen("a", "1") == 2
+ assert r.hstrlen("a", "2") == 3
# SORT
def test_sort_basic(self, r):
- r.rpush('a', '3', '2', '1', '4')
- assert r.sort('a') == [b'1', b'2', b'3', b'4']
+ r.rpush("a", "3", "2", "1", "4")
+ assert r.sort("a") == [b"1", b"2", b"3", b"4"]
def test_sort_limited(self, r):
- r.rpush('a', '3', '2', '1', '4')
- assert r.sort('a', start=1, num=2) == [b'2', b'3']
+ r.rpush("a", "3", "2", "1", "4")
+ assert r.sort("a", start=1, num=2) == [b"2", b"3"]
@pytest.mark.onlynoncluster
def test_sort_by(self, r):
- r['score:1'] = 8
- r['score:2'] = 3
- r['score:3'] = 5
- r.rpush('a', '3', '2', '1')
- assert r.sort('a', by='score:*') == [b'2', b'3', b'1']
+ r["score:1"] = 8
+ r["score:2"] = 3
+ r["score:3"] = 5
+ r.rpush("a", "3", "2", "1")
+ assert r.sort("a", by="score:*") == [b"2", b"3", b"1"]
@pytest.mark.onlynoncluster
def test_sort_get(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r.rpush('a', '2', '3', '1')
- assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3']
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r.rpush("a", "2", "3", "1")
+ assert r.sort("a", get="user:*") == [b"u1", b"u2", b"u3"]
@pytest.mark.onlynoncluster
def test_sort_get_multi(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r.rpush('a', '2', '3', '1')
- assert r.sort('a', get=('user:*', '#')) == \
- [b'u1', b'1', b'u2', b'2', b'u3', b'3']
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r.rpush("a", "2", "3", "1")
+ assert r.sort("a", get=("user:*", "#")) == [
+ b"u1",
+ b"1",
+ b"u2",
+ b"2",
+ b"u3",
+ b"3",
+ ]
@pytest.mark.onlynoncluster
def test_sort_get_groups_two(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r.rpush('a', '2', '3', '1')
- assert r.sort('a', get=('user:*', '#'), groups=True) == \
- [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')]
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r.rpush("a", "2", "3", "1")
+ assert r.sort("a", get=("user:*", "#"), groups=True) == [
+ (b"u1", b"1"),
+ (b"u2", b"2"),
+ (b"u3", b"3"),
+ ]
@pytest.mark.onlynoncluster
def test_sort_groups_string_get(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r.rpush('a', '2', '3', '1')
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r.rpush("a", "2", "3", "1")
with pytest.raises(exceptions.DataError):
- r.sort('a', get='user:*', groups=True)
+ r.sort("a", get="user:*", groups=True)
@pytest.mark.onlynoncluster
def test_sort_groups_just_one_get(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r.rpush('a', '2', '3', '1')
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r.rpush("a", "2", "3", "1")
with pytest.raises(exceptions.DataError):
- r.sort('a', get=['user:*'], groups=True)
+ r.sort("a", get=["user:*"], groups=True)
def test_sort_groups_no_get(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r.rpush('a', '2', '3', '1')
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r.rpush("a", "2", "3", "1")
with pytest.raises(exceptions.DataError):
- r.sort('a', groups=True)
+ r.sort("a", groups=True)
@pytest.mark.onlynoncluster
def test_sort_groups_three_gets(self, r):
- r['user:1'] = 'u1'
- r['user:2'] = 'u2'
- r['user:3'] = 'u3'
- r['door:1'] = 'd1'
- r['door:2'] = 'd2'
- r['door:3'] = 'd3'
- r.rpush('a', '2', '3', '1')
- assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \
- [
- (b'u1', b'd1', b'1'),
- (b'u2', b'd2', b'2'),
- (b'u3', b'd3', b'3')
- ]
+ r["user:1"] = "u1"
+ r["user:2"] = "u2"
+ r["user:3"] = "u3"
+ r["door:1"] = "d1"
+ r["door:2"] = "d2"
+ r["door:3"] = "d3"
+ r.rpush("a", "2", "3", "1")
+ assert r.sort("a", get=("user:*", "door:*", "#"), groups=True) == [
+ (b"u1", b"d1", b"1"),
+ (b"u2", b"d2", b"2"),
+ (b"u3", b"d3", b"3"),
+ ]
def test_sort_desc(self, r):
- r.rpush('a', '2', '3', '1')
- assert r.sort('a', desc=True) == [b'3', b'2', b'1']
+ r.rpush("a", "2", "3", "1")
+ assert r.sort("a", desc=True) == [b"3", b"2", b"1"]
def test_sort_alpha(self, r):
- r.rpush('a', 'e', 'c', 'b', 'd', 'a')
- assert r.sort('a', alpha=True) == \
- [b'a', b'b', b'c', b'd', b'e']
+ r.rpush("a", "e", "c", "b", "d", "a")
+ assert r.sort("a", alpha=True) == [b"a", b"b", b"c", b"d", b"e"]
@pytest.mark.onlynoncluster
def test_sort_store(self, r):
- r.rpush('a', '2', '3', '1')
- assert r.sort('a', store='sorted_values') == 3
- assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3']
+ r.rpush("a", "2", "3", "1")
+ assert r.sort("a", store="sorted_values") == 3
+ assert r.lrange("sorted_values", 0, -1) == [b"1", b"2", b"3"]
@pytest.mark.onlynoncluster
def test_sort_all_options(self, r):
- r['user:1:username'] = 'zeus'
- r['user:2:username'] = 'titan'
- r['user:3:username'] = 'hermes'
- r['user:4:username'] = 'hercules'
- r['user:5:username'] = 'apollo'
- r['user:6:username'] = 'athena'
- r['user:7:username'] = 'hades'
- r['user:8:username'] = 'dionysus'
-
- r['user:1:favorite_drink'] = 'yuengling'
- r['user:2:favorite_drink'] = 'rum'
- r['user:3:favorite_drink'] = 'vodka'
- r['user:4:favorite_drink'] = 'milk'
- r['user:5:favorite_drink'] = 'pinot noir'
- r['user:6:favorite_drink'] = 'water'
- r['user:7:favorite_drink'] = 'gin'
- r['user:8:favorite_drink'] = 'apple juice'
-
- r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4')
- num = r.sort('gods', start=2, num=4, by='user:*:username',
- get='user:*:favorite_drink', desc=True, alpha=True,
- store='sorted')
+ r["user:1:username"] = "zeus"
+ r["user:2:username"] = "titan"
+ r["user:3:username"] = "hermes"
+ r["user:4:username"] = "hercules"
+ r["user:5:username"] = "apollo"
+ r["user:6:username"] = "athena"
+ r["user:7:username"] = "hades"
+ r["user:8:username"] = "dionysus"
+
+ r["user:1:favorite_drink"] = "yuengling"
+ r["user:2:favorite_drink"] = "rum"
+ r["user:3:favorite_drink"] = "vodka"
+ r["user:4:favorite_drink"] = "milk"
+ r["user:5:favorite_drink"] = "pinot noir"
+ r["user:6:favorite_drink"] = "water"
+ r["user:7:favorite_drink"] = "gin"
+ r["user:8:favorite_drink"] = "apple juice"
+
+ r.rpush("gods", "5", "8", "3", "1", "2", "7", "6", "4")
+ num = r.sort(
+ "gods",
+ start=2,
+ num=4,
+ by="user:*:username",
+ get="user:*:favorite_drink",
+ desc=True,
+ alpha=True,
+ store="sorted",
+ )
assert num == 4
- assert r.lrange('sorted', 0, 10) == \
- [b'vodka', b'milk', b'gin', b'apple juice']
+ assert r.lrange("sorted", 0, 10) == [b"vodka", b"milk", b"gin", b"apple juice"]
def test_sort_issue_924(self, r):
# Tests for issue https://github.com/andymccurdy/redis-py/issues/924
- r.execute_command('SADD', 'issue#924', 1)
- r.execute_command('SORT', 'issue#924')
+ r.execute_command("SADD", "issue#924", 1)
+ r.execute_command("SORT", "issue#924")
@pytest.mark.onlynoncluster
def test_cluster_addslots(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True
+ assert mock_cluster_resp_ok.cluster("ADDSLOTS", 1) is True
@pytest.mark.onlynoncluster
def test_cluster_count_failure_reports(self, mock_cluster_resp_int):
- assert isinstance(mock_cluster_resp_int.cluster(
- 'COUNT-FAILURE-REPORTS', 'node'), int)
+ assert isinstance(
+ mock_cluster_resp_int.cluster("COUNT-FAILURE-REPORTS", "node"), int
+ )
@pytest.mark.onlynoncluster
def test_cluster_countkeysinslot(self, mock_cluster_resp_int):
- assert isinstance(mock_cluster_resp_int.cluster(
- 'COUNTKEYSINSLOT', 2), int)
+ assert isinstance(mock_cluster_resp_int.cluster("COUNTKEYSINSLOT", 2), int)
@pytest.mark.onlynoncluster
def test_cluster_delslots(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True
+ assert mock_cluster_resp_ok.cluster("DELSLOTS", 1) is True
@pytest.mark.onlynoncluster
def test_cluster_failover(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True
+ assert mock_cluster_resp_ok.cluster("FAILOVER", 1) is True
@pytest.mark.onlynoncluster
def test_cluster_forget(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('FORGET', 1) is True
+ assert mock_cluster_resp_ok.cluster("FORGET", 1) is True
@pytest.mark.onlynoncluster
def test_cluster_info(self, mock_cluster_resp_info):
- assert isinstance(mock_cluster_resp_info.cluster('info'), dict)
+ assert isinstance(mock_cluster_resp_info.cluster("info"), dict)
@pytest.mark.onlynoncluster
def test_cluster_keyslot(self, mock_cluster_resp_int):
- assert isinstance(mock_cluster_resp_int.cluster(
- 'keyslot', 'asdf'), int)
+ assert isinstance(mock_cluster_resp_int.cluster("keyslot", "asdf"), int)
@pytest.mark.onlynoncluster
def test_cluster_meet(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True
+ assert mock_cluster_resp_ok.cluster("meet", "ip", "port", 1) is True
@pytest.mark.onlynoncluster
def test_cluster_nodes(self, mock_cluster_resp_nodes):
- assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict)
+ assert isinstance(mock_cluster_resp_nodes.cluster("nodes"), dict)
@pytest.mark.onlynoncluster
def test_cluster_replicate(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True
+ assert mock_cluster_resp_ok.cluster("replicate", "nodeid") is True
@pytest.mark.onlynoncluster
def test_cluster_reset(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('reset', 'hard') is True
+ assert mock_cluster_resp_ok.cluster("reset", "hard") is True
@pytest.mark.onlynoncluster
def test_cluster_saveconfig(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('saveconfig') is True
+ assert mock_cluster_resp_ok.cluster("saveconfig") is True
@pytest.mark.onlynoncluster
def test_cluster_setslot(self, mock_cluster_resp_ok):
- assert mock_cluster_resp_ok.cluster('setslot', 1,
- 'IMPORTING', 'nodeid') is True
+ assert mock_cluster_resp_ok.cluster("setslot", 1, "IMPORTING", "nodeid") is True
@pytest.mark.onlynoncluster
def test_cluster_slaves(self, mock_cluster_resp_slaves):
- assert isinstance(mock_cluster_resp_slaves.cluster(
- 'slaves', 'nodeid'), dict)
+ assert isinstance(mock_cluster_resp_slaves.cluster("slaves", "nodeid"), dict)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
@skip_if_redis_enterprise
def test_readwrite(self, r):
assert r.readwrite()
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
def test_readonly_invalid_cluster_state(self, r):
with pytest.raises(exceptions.RedisError):
r.readonly()
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
def test_readonly(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.readonly() is True
# GEO COMMANDS
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geoadd(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- assert r.geoadd('barcelona', values) == 2
- assert r.zcard('barcelona') == 2
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ assert r.geoadd("barcelona", values) == 2
+ assert r.zcard("barcelona") == 2
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geoadd_nx(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- assert r.geoadd('a', values) == 2
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2') + \
- (2.1804738294738, 41.405647879212, 'place3')
- assert r.geoadd('a', values, nx=True) == 1
- assert r.zrange('a', 0, -1) == [b'place3', b'place2', b'place1']
-
- @skip_if_server_version_lt('6.2.0')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ assert r.geoadd("a", values) == 2
+ values = (
+ (2.1909389952632, 41.433791470673, "place1")
+ + (2.1873744593677, 41.406342043777, "place2")
+ + (2.1804738294738, 41.405647879212, "place3")
+ )
+ assert r.geoadd("a", values, nx=True) == 1
+ assert r.zrange("a", 0, -1) == [b"place3", b"place2", b"place1"]
+
+ @skip_if_server_version_lt("6.2.0")
def test_geoadd_xx(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1')
- assert r.geoadd('a', values) == 1
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- assert r.geoadd('a', values, xx=True) == 0
- assert r.zrange('a', 0, -1) == \
- [b'place1']
-
- @skip_if_server_version_lt('6.2.0')
+ values = (2.1909389952632, 41.433791470673, "place1")
+ assert r.geoadd("a", values) == 1
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ assert r.geoadd("a", values, xx=True) == 0
+ assert r.zrange("a", 0, -1) == [b"place1"]
+
+ @skip_if_server_version_lt("6.2.0")
def test_geoadd_ch(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1')
- assert r.geoadd('a', values) == 1
- values = (2.1909389952632, 31.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- assert r.geoadd('a', values, ch=True) == 2
- assert r.zrange('a', 0, -1) == \
- [b'place1', b'place2']
-
- @skip_if_server_version_lt('3.2.0')
+ values = (2.1909389952632, 41.433791470673, "place1")
+ assert r.geoadd("a", values) == 1
+ values = (2.1909389952632, 31.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ assert r.geoadd("a", values, ch=True) == 2
+ assert r.zrange("a", 0, -1) == [b"place1", b"place2"]
+
+ @skip_if_server_version_lt("3.2.0")
def test_geoadd_invalid_params(self, r):
with pytest.raises(exceptions.RedisError):
- r.geoadd('barcelona', (1, 2))
+ r.geoadd("barcelona", (1, 2))
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geodist(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- assert r.geoadd('barcelona', values) == 2
- assert r.geodist('barcelona', 'place1', 'place2') == 3067.4157
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ assert r.geoadd("barcelona", values) == 2
+ assert r.geodist("barcelona", "place1", "place2") == 3067.4157
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geodist_units(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- r.geoadd('barcelona', values)
- assert r.geodist('barcelona', 'place1', 'place2', 'km') == 3.0674
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ r.geoadd("barcelona", values)
+ assert r.geodist("barcelona", "place1", "place2", "km") == 3.0674
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geodist_missing_one_member(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1')
- r.geoadd('barcelona', values)
- assert r.geodist('barcelona', 'place1', 'missing_member', 'km') is None
+ values = (2.1909389952632, 41.433791470673, "place1")
+ r.geoadd("barcelona", values)
+ assert r.geodist("barcelona", "place1", "missing_member", "km") is None
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geodist_invalid_units(self, r):
with pytest.raises(exceptions.RedisError):
- assert r.geodist('x', 'y', 'z', 'inches')
+ assert r.geodist("x", "y", "z", "inches")
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geohash(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- r.geoadd('barcelona', values)
- assert r.geohash('barcelona', 'place1', 'place2', 'place3') == \
- ['sp3e9yg3kd0', 'sp3e9cbc3t0', None]
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ r.geoadd("barcelona", values)
+ assert r.geohash("barcelona", "place1", "place2", "place3") == [
+ "sp3e9yg3kd0",
+ "sp3e9cbc3t0",
+ None,
+ ]
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_geopos(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- r.geoadd('barcelona', values)
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ r.geoadd("barcelona", values)
# redis uses 52 bits precision, hereby small errors may be introduced.
- assert r.geopos('barcelona', 'place1', 'place2') == \
- [(2.19093829393386841, 41.43379028184083523),
- (2.18737632036209106, 41.40634178640635099)]
+ assert r.geopos("barcelona", "place1", "place2") == [
+ (2.19093829393386841, 41.43379028184083523),
+ (2.18737632036209106, 41.40634178640635099),
+ ]
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_geopos_no_value(self, r):
- assert r.geopos('barcelona', 'place1', 'place2') == [None, None]
+ assert r.geopos("barcelona", "place1", "place2") == [None, None]
- @skip_if_server_version_lt('3.2.0')
- @skip_if_server_version_gte('4.0.0')
+ @skip_if_server_version_lt("3.2.0")
+ @skip_if_server_version_gte("4.0.0")
def test_old_geopos_no_value(self, r):
- assert r.geopos('barcelona', 'place1', 'place2') == []
+ assert r.geopos("barcelona", "place1", "place2") == []
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearch(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, b'\x80place2') + \
- (2.583333, 41.316667, 'place3')
- r.geoadd('barcelona', values)
- assert r.geosearch('barcelona', longitude=2.191,
- latitude=41.433, radius=1000) == [b'place1']
- assert r.geosearch('barcelona', longitude=2.187,
- latitude=41.406, radius=1000) == [b'\x80place2']
- assert r.geosearch('barcelona', longitude=2.191, latitude=41.433,
- height=1000, width=1000) == [b'place1']
- assert r.geosearch('barcelona', member='place3', radius=100,
- unit='km') == [b'\x80place2', b'place1', b'place3']
+ values = (
+ (2.1909389952632, 41.433791470673, "place1")
+ + (2.1873744593677, 41.406342043777, b"\x80place2")
+ + (2.583333, 41.316667, "place3")
+ )
+ r.geoadd("barcelona", values)
+ assert r.geosearch(
+ "barcelona", longitude=2.191, latitude=41.433, radius=1000
+ ) == [b"place1"]
+ assert r.geosearch(
+ "barcelona", longitude=2.187, latitude=41.406, radius=1000
+ ) == [b"\x80place2"]
+ assert r.geosearch(
+ "barcelona", longitude=2.191, latitude=41.433, height=1000, width=1000
+ ) == [b"place1"]
+ assert r.geosearch("barcelona", member="place3", radius=100, unit="km") == [
+ b"\x80place2",
+ b"place1",
+ b"place3",
+ ]
# test count
- assert r.geosearch('barcelona', member='place3', radius=100,
- unit='km', count=2) == [b'place3', b'\x80place2']
- assert r.geosearch('barcelona', member='place3', radius=100,
- unit='km', count=1, any=1)[0] \
- in [b'place1', b'place3', b'\x80place2']
+ assert r.geosearch(
+ "barcelona", member="place3", radius=100, unit="km", count=2
+ ) == [b"place3", b"\x80place2"]
+ assert r.geosearch(
+ "barcelona", member="place3", radius=100, unit="km", count=1, any=1
+ )[0] in [b"place1", b"place3", b"\x80place2"]
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearch_member(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, b'\x80place2')
-
- r.geoadd('barcelona', values)
- assert r.geosearch('barcelona', member='place1', radius=4000) == \
- [b'\x80place2', b'place1']
- assert r.geosearch('barcelona', member='place1', radius=10) == \
- [b'place1']
-
- assert r.geosearch('barcelona', member='place1', radius=4000,
- withdist=True,
- withcoord=True,
- withhash=True) == \
- [[b'\x80place2', 3067.4157, 3471609625421029,
- (2.187376320362091, 41.40634178640635)],
- [b'place1', 0.0, 3471609698139488,
- (2.1909382939338684, 41.433790281840835)]]
-
- @skip_if_server_version_lt('6.2.0')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ b"\x80place2",
+ )
+
+ r.geoadd("barcelona", values)
+ assert r.geosearch("barcelona", member="place1", radius=4000) == [
+ b"\x80place2",
+ b"place1",
+ ]
+ assert r.geosearch("barcelona", member="place1", radius=10) == [b"place1"]
+
+ assert r.geosearch(
+ "barcelona",
+ member="place1",
+ radius=4000,
+ withdist=True,
+ withcoord=True,
+ withhash=True,
+ ) == [
+ [
+ b"\x80place2",
+ 3067.4157,
+ 3471609625421029,
+ (2.187376320362091, 41.40634178640635),
+ ],
+ [
+ b"place1",
+ 0.0,
+ 3471609698139488,
+ (2.1909382939338684, 41.433790281840835),
+ ],
+ ]
+
+ @skip_if_server_version_lt("6.2.0")
def test_geosearch_sort(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- r.geoadd('barcelona', values)
- assert r.geosearch('barcelona', longitude=2.191,
- latitude=41.433, radius=3000, sort='ASC') == \
- [b'place1', b'place2']
- assert r.geosearch('barcelona', longitude=2.191,
- latitude=41.433, radius=3000, sort='DESC') == \
- [b'place2', b'place1']
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ r.geoadd("barcelona", values)
+ assert r.geosearch(
+ "barcelona", longitude=2.191, latitude=41.433, radius=3000, sort="ASC"
+ ) == [b"place1", b"place2"]
+ assert r.geosearch(
+ "barcelona", longitude=2.191, latitude=41.433, radius=3000, sort="DESC"
+ ) == [b"place2", b"place1"]
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearch_with(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
- r.geoadd('barcelona', values)
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
+ r.geoadd("barcelona", values)
# test a bunch of combinations to test the parse response
# function.
- assert r.geosearch('barcelona', longitude=2.191, latitude=41.433,
- radius=1, unit='km', withdist=True,
- withcoord=True, withhash=True) == \
- [[b'place1', 0.0881, 3471609698139488,
- (2.19093829393386841, 41.43379028184083523)]]
- assert r.geosearch('barcelona', longitude=2.191, latitude=41.433,
- radius=1, unit='km',
- withdist=True, withcoord=True) == \
- [[b'place1', 0.0881,
- (2.19093829393386841, 41.43379028184083523)]]
- assert r.geosearch('barcelona', longitude=2.191, latitude=41.433,
- radius=1, unit='km',
- withhash=True, withcoord=True) == \
- [[b'place1', 3471609698139488,
- (2.19093829393386841, 41.43379028184083523)]]
+ assert r.geosearch(
+ "barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1,
+ unit="km",
+ withdist=True,
+ withcoord=True,
+ withhash=True,
+ ) == [
+ [
+ b"place1",
+ 0.0881,
+ 3471609698139488,
+ (2.19093829393386841, 41.43379028184083523),
+ ]
+ ]
+ assert (
+ r.geosearch(
+ "barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1,
+ unit="km",
+ withdist=True,
+ withcoord=True,
+ )
+ == [[b"place1", 0.0881, (2.19093829393386841, 41.43379028184083523)]]
+ )
+ assert r.geosearch(
+ "barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1,
+ unit="km",
+ withhash=True,
+ withcoord=True,
+ ) == [
+ [b"place1", 3471609698139488, (2.19093829393386841, 41.43379028184083523)]
+ ]
# test no values.
- assert r.geosearch('barcelona', longitude=2, latitude=1,
- radius=1, unit='km', withdist=True,
- withcoord=True, withhash=True) == []
+ assert (
+ r.geosearch(
+ "barcelona",
+ longitude=2,
+ latitude=1,
+ radius=1,
+ unit="km",
+ withdist=True,
+ withcoord=True,
+ withhash=True,
+ )
+ == []
+ )
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearch_negative(self, r):
# not specifying member nor longitude and latitude
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona')
+ assert r.geosearch("barcelona")
# specifying member and longitude and latitude
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona',
- member="Paris", longitude=2, latitude=1)
+ assert r.geosearch("barcelona", member="Paris", longitude=2, latitude=1)
# specifying one of longitude and latitude
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', longitude=2)
+ assert r.geosearch("barcelona", longitude=2)
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', latitude=2)
+ assert r.geosearch("barcelona", latitude=2)
# not specifying radius nor width and height
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', member="Paris")
+ assert r.geosearch("barcelona", member="Paris")
# specifying radius and width and height
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', member="Paris",
- radius=3, width=2, height=1)
+ assert r.geosearch("barcelona", member="Paris", radius=3, width=2, height=1)
# specifying one of width and height
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', member="Paris", width=2)
+ assert r.geosearch("barcelona", member="Paris", width=2)
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', member="Paris", height=2)
+ assert r.geosearch("barcelona", member="Paris", height=2)
# invalid sort
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona',
- member="Paris", width=2, height=2, sort="wrong")
+ assert r.geosearch(
+ "barcelona", member="Paris", width=2, height=2, sort="wrong"
+ )
# invalid unit
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona',
- member="Paris", width=2, height=2, unit="miles")
+ assert r.geosearch(
+ "barcelona", member="Paris", width=2, height=2, unit="miles"
+ )
# use any without count
with pytest.raises(exceptions.DataError):
- assert r.geosearch('barcelona', member='place3', radius=100, any=1)
+ assert r.geosearch("barcelona", member="place3", radius=100, any=1)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearchstore(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- r.geosearchstore('places_barcelona', 'barcelona',
- longitude=2.191, latitude=41.433, radius=1000)
- assert r.zrange('places_barcelona', 0, -1) == [b'place1']
+ r.geoadd("barcelona", values)
+ r.geosearchstore(
+ "places_barcelona",
+ "barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1000,
+ )
+ assert r.zrange("places_barcelona", 0, -1) == [b"place1"]
@pytest.mark.onlynoncluster
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_geosearchstore_dist(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- r.geosearchstore('places_barcelona', 'barcelona',
- longitude=2.191, latitude=41.433,
- radius=1000, storedist=True)
+ r.geoadd("barcelona", values)
+ r.geosearchstore(
+ "places_barcelona",
+ "barcelona",
+ longitude=2.191,
+ latitude=41.433,
+ radius=1000,
+ storedist=True,
+ )
# instead of save the geo score, the distance is saved.
- assert r.zscore('places_barcelona', 'place1') == 88.05060698409301
+ assert r.zscore("places_barcelona", "place1") == 88.05060698409301
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, b'\x80place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ b"\x80place2",
+ )
- r.geoadd('barcelona', values)
- assert r.georadius('barcelona', 2.191, 41.433, 1000) == [b'place1']
- assert r.georadius('barcelona', 2.187, 41.406, 1000) == [b'\x80place2']
+ r.geoadd("barcelona", values)
+ assert r.georadius("barcelona", 2.191, 41.433, 1000) == [b"place1"]
+ assert r.georadius("barcelona", 2.187, 41.406, 1000) == [b"\x80place2"]
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius_no_values(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- assert r.georadius('barcelona', 1, 2, 1000) == []
+ r.geoadd("barcelona", values)
+ assert r.georadius("barcelona", 1, 2, 1000) == []
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius_units(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km') == \
- [b'place1']
+ r.geoadd("barcelona", values)
+ assert r.georadius("barcelona", 2.191, 41.433, 1, unit="km") == [b"place1"]
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius_with(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
+ r.geoadd("barcelona", values)
# test a bunch of combinations to test the parse response
# function.
- assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km',
- withdist=True, withcoord=True, withhash=True) == \
- [[b'place1', 0.0881, 3471609698139488,
- (2.19093829393386841, 41.43379028184083523)]]
+ assert r.georadius(
+ "barcelona",
+ 2.191,
+ 41.433,
+ 1,
+ unit="km",
+ withdist=True,
+ withcoord=True,
+ withhash=True,
+ ) == [
+ [
+ b"place1",
+ 0.0881,
+ 3471609698139488,
+ (2.19093829393386841, 41.43379028184083523),
+ ]
+ ]
- assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km',
- withdist=True, withcoord=True) == \
- [[b'place1', 0.0881,
- (2.19093829393386841, 41.43379028184083523)]]
+ assert r.georadius(
+ "barcelona", 2.191, 41.433, 1, unit="km", withdist=True, withcoord=True
+ ) == [[b"place1", 0.0881, (2.19093829393386841, 41.43379028184083523)]]
- assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km',
- withhash=True, withcoord=True) == \
- [[b'place1', 3471609698139488,
- (2.19093829393386841, 41.43379028184083523)]]
+ assert r.georadius(
+ "barcelona", 2.191, 41.433, 1, unit="km", withhash=True, withcoord=True
+ ) == [
+ [b"place1", 3471609698139488, (2.19093829393386841, 41.43379028184083523)]
+ ]
# test no values.
- assert r.georadius('barcelona', 2, 1, 1, unit='km',
- withdist=True, withcoord=True, withhash=True) == []
+ assert (
+ r.georadius(
+ "barcelona",
+ 2,
+ 1,
+ 1,
+ unit="km",
+ withdist=True,
+ withcoord=True,
+ withhash=True,
+ )
+ == []
+ )
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_georadius_count(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- assert r.georadius('barcelona', 2.191, 41.433, 3000, count=1) == \
- [b'place1']
- assert r.georadius('barcelona', 2.191, 41.433, 3000,
- count=1, any=True) == \
- [b'place2']
+ r.geoadd("barcelona", values)
+ assert r.georadius("barcelona", 2.191, 41.433, 3000, count=1) == [b"place1"]
+ assert r.georadius("barcelona", 2.191, 41.433, 3000, count=1, any=True) == [
+ b"place2"
+ ]
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius_sort(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='ASC') == \
- [b'place1', b'place2']
- assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') == \
- [b'place2', b'place1']
+ r.geoadd("barcelona", values)
+ assert r.georadius("barcelona", 2.191, 41.433, 3000, sort="ASC") == [
+ b"place1",
+ b"place2",
+ ]
+ assert r.georadius("barcelona", 2.191, 41.433, 3000, sort="DESC") == [
+ b"place2",
+ b"place1",
+ ]
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius_store(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- r.georadius('barcelona', 2.191, 41.433, 1000, store='places_barcelona')
- assert r.zrange('places_barcelona', 0, -1) == [b'place1']
+ r.geoadd("barcelona", values)
+ r.georadius("barcelona", 2.191, 41.433, 1000, store="places_barcelona")
+ assert r.zrange("places_barcelona", 0, -1) == [b"place1"]
@pytest.mark.onlynoncluster
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadius_store_dist(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, 'place2')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ "place2",
+ )
- r.geoadd('barcelona', values)
- r.georadius('barcelona', 2.191, 41.433, 1000,
- store_dist='places_barcelona')
+ r.geoadd("barcelona", values)
+ r.georadius("barcelona", 2.191, 41.433, 1000, store_dist="places_barcelona")
# instead of save the geo score, the distance is saved.
- assert r.zscore('places_barcelona', 'place1') == 88.05060698409301
+ assert r.zscore("places_barcelona", "place1") == 88.05060698409301
@skip_unless_arch_bits(64)
- @skip_if_server_version_lt('3.2.0')
+ @skip_if_server_version_lt("3.2.0")
def test_georadiusmember(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, b'\x80place2')
-
- r.geoadd('barcelona', values)
- assert r.georadiusbymember('barcelona', 'place1', 4000) == \
- [b'\x80place2', b'place1']
- assert r.georadiusbymember('barcelona', 'place1', 10) == [b'place1']
-
- assert r.georadiusbymember('barcelona', 'place1', 4000,
- withdist=True, withcoord=True,
- withhash=True) == \
- [[b'\x80place2', 3067.4157, 3471609625421029,
- (2.187376320362091, 41.40634178640635)],
- [b'place1', 0.0, 3471609698139488,
- (2.1909382939338684, 41.433790281840835)]]
-
- @skip_if_server_version_lt('6.2.0')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ b"\x80place2",
+ )
+
+ r.geoadd("barcelona", values)
+ assert r.georadiusbymember("barcelona", "place1", 4000) == [
+ b"\x80place2",
+ b"place1",
+ ]
+ assert r.georadiusbymember("barcelona", "place1", 10) == [b"place1"]
+
+ assert r.georadiusbymember(
+ "barcelona", "place1", 4000, withdist=True, withcoord=True, withhash=True
+ ) == [
+ [
+ b"\x80place2",
+ 3067.4157,
+ 3471609625421029,
+ (2.187376320362091, 41.40634178640635),
+ ],
+ [
+ b"place1",
+ 0.0,
+ 3471609698139488,
+ (2.1909382939338684, 41.433790281840835),
+ ],
+ ]
+
+ @skip_if_server_version_lt("6.2.0")
def test_georadiusmember_count(self, r):
- values = (2.1909389952632, 41.433791470673, 'place1') + \
- (2.1873744593677, 41.406342043777, b'\x80place2')
- r.geoadd('barcelona', values)
- assert r.georadiusbymember('barcelona', 'place1', 4000,
- count=1, any=True) == \
- [b'\x80place2']
-
- @skip_if_server_version_lt('5.0.0')
+ values = (2.1909389952632, 41.433791470673, "place1") + (
+ 2.1873744593677,
+ 41.406342043777,
+ b"\x80place2",
+ )
+ r.geoadd("barcelona", values)
+ assert r.georadiusbymember("barcelona", "place1", 4000, count=1, any=True) == [
+ b"\x80place2"
+ ]
+
+ @skip_if_server_version_lt("5.0.0")
def test_xack(self, r):
- stream = 'stream'
- group = 'group'
- consumer = 'consumer'
+ stream = "stream"
+ group = "group"
+ consumer = "consumer"
# xack on a stream that doesn't exist
- assert r.xack(stream, group, '0-0') == 0
+ assert r.xack(stream, group, "0-0") == 0
- m1 = r.xadd(stream, {'one': 'one'})
- m2 = r.xadd(stream, {'two': 'two'})
- m3 = r.xadd(stream, {'three': 'three'})
+ m1 = r.xadd(stream, {"one": "one"})
+ m2 = r.xadd(stream, {"two": "two"})
+ m3 = r.xadd(stream, {"three": "three"})
# xack on a group that doesn't exist
assert r.xack(stream, group, m1) == 0
r.xgroup_create(stream, group, 0)
- r.xreadgroup(group, consumer, streams={stream: '>'})
+ r.xreadgroup(group, consumer, streams={stream: ">"})
# xack returns the number of ack'd elements
assert r.xack(stream, group, m1) == 1
assert r.xack(stream, group, m2, m3) == 2
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xadd(self, r):
- stream = 'stream'
- message_id = r.xadd(stream, {'foo': 'bar'})
- assert re.match(br'[0-9]+\-[0-9]+', message_id)
+ stream = "stream"
+ message_id = r.xadd(stream, {"foo": "bar"})
+ assert re.match(br"[0-9]+\-[0-9]+", message_id)
# explicit message id
- message_id = b'9999999999999999999-0'
- assert message_id == r.xadd(stream, {'foo': 'bar'}, id=message_id)
+ message_id = b"9999999999999999999-0"
+ assert message_id == r.xadd(stream, {"foo": "bar"}, id=message_id)
# with maxlen, the list evicts the first message
- r.xadd(stream, {'foo': 'bar'}, maxlen=2, approximate=False)
+ r.xadd(stream, {"foo": "bar"}, maxlen=2, approximate=False)
assert r.xlen(stream) == 2
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_xadd_nomkstream(self, r):
# nomkstream option
- stream = 'stream'
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'some': 'other'}, nomkstream=False)
+ stream = "stream"
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"some": "other"}, nomkstream=False)
assert r.xlen(stream) == 2
- r.xadd(stream, {'some': 'other'}, nomkstream=True)
+ r.xadd(stream, {"some": "other"}, nomkstream=True)
assert r.xlen(stream) == 3
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_xadd_minlen_and_limit(self, r):
- stream = 'stream'
+ stream = "stream"
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
# Future self: No limits without approximate, according to the api
with pytest.raises(redis.ResponseError):
- assert r.xadd(stream, {'foo': 'bar'}, maxlen=3,
- approximate=False, limit=2)
+ assert r.xadd(stream, {"foo": "bar"}, maxlen=3, approximate=False, limit=2)
# limit can not be provided without maxlen or minid
with pytest.raises(redis.ResponseError):
- assert r.xadd(stream, {'foo': 'bar'}, limit=2)
+ assert r.xadd(stream, {"foo": "bar"}, limit=2)
# maxlen with a limit
- assert r.xadd(stream, {'foo': 'bar'}, maxlen=3,
- approximate=True, limit=2)
+ assert r.xadd(stream, {"foo": "bar"}, maxlen=3, approximate=True, limit=2)
r.delete(stream)
# maxlen and minid can not be provided together
with pytest.raises(redis.DataError):
- assert r.xadd(stream, {'foo': 'bar'}, maxlen=3,
- minid="sometestvalue")
+ assert r.xadd(stream, {"foo": "bar"}, maxlen=3, minid="sometestvalue")
# minid with a limit
- m1 = r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- assert r.xadd(stream, {'foo': 'bar'}, approximate=True,
- minid=m1, limit=3)
+ m1 = r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ assert r.xadd(stream, {"foo": "bar"}, approximate=True, minid=m1, limit=3)
# pure minid
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- m4 = r.xadd(stream, {'foo': 'bar'})
- assert r.xadd(stream, {'foo': 'bar'}, approximate=False, minid=m4)
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ m4 = r.xadd(stream, {"foo": "bar"})
+ assert r.xadd(stream, {"foo": "bar"}, approximate=False, minid=m4)
# minid approximate
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- m3 = r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- assert r.xadd(stream, {'foo': 'bar'}, approximate=True, minid=m3)
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ m3 = r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ assert r.xadd(stream, {"foo": "bar"}, approximate=True, minid=m3)
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_xautoclaim(self, r):
- stream = 'stream'
- group = 'group'
- consumer1 = 'consumer1'
- consumer2 = 'consumer2'
+ stream = "stream"
+ group = "group"
+ consumer1 = "consumer1"
+ consumer2 = "consumer2"
- message_id1 = r.xadd(stream, {'john': 'wick'})
- message_id2 = r.xadd(stream, {'johny': 'deff'})
+ message_id1 = r.xadd(stream, {"john": "wick"})
+ message_id2 = r.xadd(stream, {"johny": "deff"})
message = get_stream_message(r, stream, message_id1)
r.xgroup_create(stream, group, 0)
@@ -3084,70 +3348,78 @@ class TestRedisCommands:
assert response == []
# read the group as consumer1 to initially claim the messages
- r.xreadgroup(group, consumer1, streams={stream: '>'})
+ r.xreadgroup(group, consumer1, streams={stream: ">"})
# claim one message as consumer2
- response = r.xautoclaim(stream, group, consumer2,
- min_idle_time=0, count=1)
+ response = r.xautoclaim(stream, group, consumer2, min_idle_time=0, count=1)
assert response == [message]
# reclaim the messages as consumer1, but use the justid argument
# which only returns message ids
- assert r.xautoclaim(stream, group, consumer1, min_idle_time=0,
- start_id=0, justid=True) == \
- [message_id1, message_id2]
- assert r.xautoclaim(stream, group, consumer1, min_idle_time=0,
- start_id=message_id2, justid=True) == \
- [message_id2]
-
- @skip_if_server_version_lt('6.2.0')
+ assert r.xautoclaim(
+ stream, group, consumer1, min_idle_time=0, start_id=0, justid=True
+ ) == [message_id1, message_id2]
+ assert r.xautoclaim(
+ stream, group, consumer1, min_idle_time=0, start_id=message_id2, justid=True
+ ) == [message_id2]
+
+ @skip_if_server_version_lt("6.2.0")
def test_xautoclaim_negative(self, r):
- stream = 'stream'
- group = 'group'
- consumer = 'consumer'
+ stream = "stream"
+ group = "group"
+ consumer = "consumer"
with pytest.raises(redis.DataError):
r.xautoclaim(stream, group, consumer, min_idle_time=-1)
with pytest.raises(ValueError):
r.xautoclaim(stream, group, consumer, min_idle_time="wrong")
with pytest.raises(redis.DataError):
- r.xautoclaim(stream, group, consumer, min_idle_time=0,
- count=-1)
+ r.xautoclaim(stream, group, consumer, min_idle_time=0, count=-1)
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xclaim(self, r):
- stream = 'stream'
- group = 'group'
- consumer1 = 'consumer1'
- consumer2 = 'consumer2'
- message_id = r.xadd(stream, {'john': 'wick'})
+ stream = "stream"
+ group = "group"
+ consumer1 = "consumer1"
+ consumer2 = "consumer2"
+ message_id = r.xadd(stream, {"john": "wick"})
message = get_stream_message(r, stream, message_id)
r.xgroup_create(stream, group, 0)
# trying to claim a message that isn't already pending doesn't
# do anything
- response = r.xclaim(stream, group, consumer2,
- min_idle_time=0, message_ids=(message_id,))
+ response = r.xclaim(
+ stream, group, consumer2, min_idle_time=0, message_ids=(message_id,)
+ )
assert response == []
# read the group as consumer1 to initially claim the messages
- r.xreadgroup(group, consumer1, streams={stream: '>'})
+ r.xreadgroup(group, consumer1, streams={stream: ">"})
# claim the message as consumer2
- response = r.xclaim(stream, group, consumer2,
- min_idle_time=0, message_ids=(message_id,))
+ response = r.xclaim(
+ stream, group, consumer2, min_idle_time=0, message_ids=(message_id,)
+ )
assert response[0] == message
# reclaim the message as consumer1, but use the justid argument
# which only returns message ids
- assert r.xclaim(stream, group, consumer1,
- min_idle_time=0, message_ids=(message_id,),
- justid=True) == [message_id]
+ assert (
+ r.xclaim(
+ stream,
+ group,
+ consumer1,
+ min_idle_time=0,
+ message_ids=(message_id,),
+ justid=True,
+ )
+ == [message_id]
+ )
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xclaim_trimmed(self, r):
# xclaim should not raise an exception if the item is not there
- stream = 'stream'
- group = 'group'
+ stream = "stream"
+ group = "group"
r.xgroup_create(stream, group, id="$", mkstream=True)
@@ -3156,57 +3428,59 @@ class TestRedisCommands:
sid2 = r.xadd(stream, {"item": 0})
# read them from consumer1
- r.xreadgroup(group, 'consumer1', {stream: ">"})
+ r.xreadgroup(group, "consumer1", {stream: ">"})
# add a 3rd and trim the stream down to 2 items
r.xadd(stream, {"item": 3}, maxlen=2, approximate=False)
# xclaim them from consumer2
# the item that is still in the stream should be returned
- item = r.xclaim(stream, group, 'consumer2', 0, [sid1, sid2])
+ item = r.xclaim(stream, group, "consumer2", 0, [sid1, sid2])
assert len(item) == 2
assert item[0] == (None, None)
assert item[1][0] == sid2
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xdel(self, r):
- stream = 'stream'
+ stream = "stream"
# deleting from an empty stream doesn't do anything
assert r.xdel(stream, 1) == 0
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'foo': 'bar'})
- m3 = r.xadd(stream, {'foo': 'bar'})
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"foo": "bar"})
+ m3 = r.xadd(stream, {"foo": "bar"})
# xdel returns the number of deleted elements
assert r.xdel(stream, m1) == 1
assert r.xdel(stream, m2, m3) == 2
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xgroup_create(self, r):
# tests xgroup_create and xinfo_groups
- stream = 'stream'
- group = 'group'
- r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ r.xadd(stream, {"foo": "bar"})
# no group is setup yet, no info to obtain
assert r.xinfo_groups(stream) == []
assert r.xgroup_create(stream, group, 0)
- expected = [{
- 'name': group.encode(),
- 'consumers': 0,
- 'pending': 0,
- 'last-delivered-id': b'0-0'
- }]
+ expected = [
+ {
+ "name": group.encode(),
+ "consumers": 0,
+ "pending": 0,
+ "last-delivered-id": b"0-0",
+ }
+ ]
assert r.xinfo_groups(stream) == expected
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xgroup_create_mkstream(self, r):
# tests xgroup_create and xinfo_groups
- stream = 'stream'
- group = 'group'
+ stream = "stream"
+ group = "group"
# an error is raised if a group is created on a stream that
# doesn't already exist
@@ -3216,53 +3490,55 @@ class TestRedisCommands:
# however, with mkstream=True, the underlying stream is created
# automatically
assert r.xgroup_create(stream, group, 0, mkstream=True)
- expected = [{
- 'name': group.encode(),
- 'consumers': 0,
- 'pending': 0,
- 'last-delivered-id': b'0-0'
- }]
+ expected = [
+ {
+ "name": group.encode(),
+ "consumers": 0,
+ "pending": 0,
+ "last-delivered-id": b"0-0",
+ }
+ ]
assert r.xinfo_groups(stream) == expected
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xgroup_delconsumer(self, r):
- stream = 'stream'
- group = 'group'
- consumer = 'consumer'
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ consumer = "consumer"
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
# a consumer that hasn't yet read any messages doesn't do anything
assert r.xgroup_delconsumer(stream, group, consumer) == 0
# read all messages from the group
- r.xreadgroup(group, consumer, streams={stream: '>'})
+ r.xreadgroup(group, consumer, streams={stream: ">"})
# deleting the consumer should return 2 pending messages
assert r.xgroup_delconsumer(stream, group, consumer) == 2
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_xgroup_createconsumer(self, r):
- stream = 'stream'
- group = 'group'
- consumer = 'consumer'
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ consumer = "consumer"
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
assert r.xgroup_createconsumer(stream, group, consumer) == 1
# read all messages from the group
- r.xreadgroup(group, consumer, streams={stream: '>'})
+ r.xreadgroup(group, consumer, streams={stream: ">"})
# deleting the consumer should return 2 pending messages
assert r.xgroup_delconsumer(stream, group, consumer) == 2
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xgroup_destroy(self, r):
- stream = 'stream'
- group = 'group'
- r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ r.xadd(stream, {"foo": "bar"})
# destroying a nonexistent group returns False
assert not r.xgroup_destroy(stream, group)
@@ -3270,198 +3546,189 @@ class TestRedisCommands:
r.xgroup_create(stream, group, 0)
assert r.xgroup_destroy(stream, group)
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xgroup_setid(self, r):
- stream = 'stream'
- group = 'group'
- message_id = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ message_id = r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
# advance the last_delivered_id to the message_id
r.xgroup_setid(stream, group, message_id)
- expected = [{
- 'name': group.encode(),
- 'consumers': 0,
- 'pending': 0,
- 'last-delivered-id': message_id
- }]
+ expected = [
+ {
+ "name": group.encode(),
+ "consumers": 0,
+ "pending": 0,
+ "last-delivered-id": message_id,
+ }
+ ]
assert r.xinfo_groups(stream) == expected
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xinfo_consumers(self, r):
- stream = 'stream'
- group = 'group'
- consumer1 = 'consumer1'
- consumer2 = 'consumer2'
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ consumer1 = "consumer1"
+ consumer2 = "consumer2"
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
- r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1)
- r.xreadgroup(group, consumer2, streams={stream: '>'})
+ r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1)
+ r.xreadgroup(group, consumer2, streams={stream: ">"})
info = r.xinfo_consumers(stream, group)
assert len(info) == 2
expected = [
- {'name': consumer1.encode(), 'pending': 1},
- {'name': consumer2.encode(), 'pending': 2},
+ {"name": consumer1.encode(), "pending": 1},
+ {"name": consumer2.encode(), "pending": 2},
]
# we can't determine the idle time, so just make sure it's an int
- assert isinstance(info[0].pop('idle'), int)
- assert isinstance(info[1].pop('idle'), int)
+ assert isinstance(info[0].pop("idle"), int)
+ assert isinstance(info[1].pop("idle"), int)
assert info == expected
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xinfo_stream(self, r):
- stream = 'stream'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"foo": "bar"})
info = r.xinfo_stream(stream)
- assert info['length'] == 2
- assert info['first-entry'] == get_stream_message(r, stream, m1)
- assert info['last-entry'] == get_stream_message(r, stream, m2)
+ assert info["length"] == 2
+ assert info["first-entry"] == get_stream_message(r, stream, m1)
+ assert info["last-entry"] == get_stream_message(r, stream, m2)
- @skip_if_server_version_lt('6.0.0')
+ @skip_if_server_version_lt("6.0.0")
def test_xinfo_stream_full(self, r):
- stream = 'stream'
- group = 'group'
- m1 = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ m1 = r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
info = r.xinfo_stream(stream, full=True)
- assert info['length'] == 1
- assert m1 in info['entries']
- assert len(info['groups']) == 1
+ assert info["length"] == 1
+ assert m1 in info["entries"]
+ assert len(info["groups"]) == 1
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xlen(self, r):
- stream = 'stream'
+ stream = "stream"
assert r.xlen(stream) == 0
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
assert r.xlen(stream) == 2
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xpending(self, r):
- stream = 'stream'
- group = 'group'
- consumer1 = 'consumer1'
- consumer2 = 'consumer2'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ consumer1 = "consumer1"
+ consumer2 = "consumer2"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
# xpending on a group that has no consumers yet
- expected = {
- 'pending': 0,
- 'min': None,
- 'max': None,
- 'consumers': []
- }
+ expected = {"pending": 0, "min": None, "max": None, "consumers": []}
assert r.xpending(stream, group) == expected
# read 1 message from the group with each consumer
- r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1)
- r.xreadgroup(group, consumer2, streams={stream: '>'}, count=1)
+ r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1)
+ r.xreadgroup(group, consumer2, streams={stream: ">"}, count=1)
expected = {
- 'pending': 2,
- 'min': m1,
- 'max': m2,
- 'consumers': [
- {'name': consumer1.encode(), 'pending': 1},
- {'name': consumer2.encode(), 'pending': 1},
- ]
+ "pending": 2,
+ "min": m1,
+ "max": m2,
+ "consumers": [
+ {"name": consumer1.encode(), "pending": 1},
+ {"name": consumer2.encode(), "pending": 1},
+ ],
}
assert r.xpending(stream, group) == expected
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xpending_range(self, r):
- stream = 'stream'
- group = 'group'
- consumer1 = 'consumer1'
- consumer2 = 'consumer2'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ consumer1 = "consumer1"
+ consumer2 = "consumer2"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
# xpending range on a group that has no consumers yet
- assert r.xpending_range(stream, group, min='-', max='+', count=5) == []
+ assert r.xpending_range(stream, group, min="-", max="+", count=5) == []
# read 1 message from the group with each consumer
- r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1)
- r.xreadgroup(group, consumer2, streams={stream: '>'}, count=1)
+ r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1)
+ r.xreadgroup(group, consumer2, streams={stream: ">"}, count=1)
- response = r.xpending_range(stream, group,
- min='-', max='+', count=5)
+ response = r.xpending_range(stream, group, min="-", max="+", count=5)
assert len(response) == 2
- assert response[0]['message_id'] == m1
- assert response[0]['consumer'] == consumer1.encode()
- assert response[1]['message_id'] == m2
- assert response[1]['consumer'] == consumer2.encode()
+ assert response[0]["message_id"] == m1
+ assert response[0]["consumer"] == consumer1.encode()
+ assert response[1]["message_id"] == m2
+ assert response[1]["consumer"] == consumer2.encode()
# test with consumer name
- response = r.xpending_range(stream, group,
- min='-', max='+', count=5,
- consumername=consumer1)
- assert response[0]['message_id'] == m1
- assert response[0]['consumer'] == consumer1.encode()
+ response = r.xpending_range(
+ stream, group, min="-", max="+", count=5, consumername=consumer1
+ )
+ assert response[0]["message_id"] == m1
+ assert response[0]["consumer"] == consumer1.encode()
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_xpending_range_idle(self, r):
- stream = 'stream'
- group = 'group'
- consumer1 = 'consumer1'
- consumer2 = 'consumer2'
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ group = "group"
+ consumer1 = "consumer1"
+ consumer2 = "consumer2"
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
r.xgroup_create(stream, group, 0)
# read 1 message from the group with each consumer
- r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1)
- r.xreadgroup(group, consumer2, streams={stream: '>'}, count=1)
+ r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1)
+ r.xreadgroup(group, consumer2, streams={stream: ">"}, count=1)
- response = r.xpending_range(stream, group,
- min='-', max='+', count=5)
+ response = r.xpending_range(stream, group, min="-", max="+", count=5)
assert len(response) == 2
- response = r.xpending_range(stream, group,
- min='-', max='+', count=5, idle=1000)
+ response = r.xpending_range(stream, group, min="-", max="+", count=5, idle=1000)
assert len(response) == 0
def test_xpending_range_negative(self, r):
- stream = 'stream'
- group = 'group'
+ stream = "stream"
+ group = "group"
with pytest.raises(redis.DataError):
- r.xpending_range(stream, group, min='-', max='+', count=None)
+ r.xpending_range(stream, group, min="-", max="+", count=None)
with pytest.raises(ValueError):
- r.xpending_range(stream, group, min='-', max='+', count="one")
+ r.xpending_range(stream, group, min="-", max="+", count="one")
with pytest.raises(redis.DataError):
- r.xpending_range(stream, group, min='-', max='+', count=-1)
+ r.xpending_range(stream, group, min="-", max="+", count=-1)
with pytest.raises(ValueError):
- r.xpending_range(stream, group, min='-', max='+', count=5,
- idle="one")
+ r.xpending_range(stream, group, min="-", max="+", count=5, idle="one")
with pytest.raises(redis.exceptions.ResponseError):
- r.xpending_range(stream, group, min='-', max='+', count=5,
- idle=1.5)
+ r.xpending_range(stream, group, min="-", max="+", count=5, idle=1.5)
with pytest.raises(redis.DataError):
- r.xpending_range(stream, group, min='-', max='+', count=5,
- idle=-1)
+ r.xpending_range(stream, group, min="-", max="+", count=5, idle=-1)
with pytest.raises(redis.DataError):
- r.xpending_range(stream, group, min=None, max=None, count=None,
- idle=0)
+ r.xpending_range(stream, group, min=None, max=None, count=None, idle=0)
with pytest.raises(redis.DataError):
- r.xpending_range(stream, group, min=None, max=None, count=None,
- consumername=0)
+ r.xpending_range(
+ stream, group, min=None, max=None, count=None, consumername=0
+ )
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xrange(self, r):
- stream = 'stream'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'foo': 'bar'})
- m3 = r.xadd(stream, {'foo': 'bar'})
- m4 = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"foo": "bar"})
+ m3 = r.xadd(stream, {"foo": "bar"})
+ m4 = r.xadd(stream, {"foo": "bar"})
def get_ids(results):
return [result[0] for result in results]
@@ -3478,11 +3745,11 @@ class TestRedisCommands:
results = r.xrange(stream, max=m2, count=1)
assert get_ids(results) == [m1]
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xread(self, r):
- stream = 'stream'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'bing': 'baz'})
+ stream = "stream"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"bing": "baz"})
expected = [
[
@@ -3490,7 +3757,7 @@ class TestRedisCommands:
[
get_stream_message(r, stream, m1),
get_stream_message(r, stream, m2),
- ]
+ ],
]
]
# xread starting at 0 returns both messages
@@ -3501,7 +3768,7 @@ class TestRedisCommands:
stream.encode(),
[
get_stream_message(r, stream, m1),
- ]
+ ],
]
]
# xread starting at 0 and count=1 returns only the first message
@@ -3512,7 +3779,7 @@ class TestRedisCommands:
stream.encode(),
[
get_stream_message(r, stream, m2),
- ]
+ ],
]
]
# xread starting at m1 returns only the second message
@@ -3521,13 +3788,13 @@ class TestRedisCommands:
# xread starting at the last message returns an empty list
assert r.xread(streams={stream: m2}) == []
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xreadgroup(self, r):
- stream = 'stream'
- group = 'group'
- consumer = 'consumer'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'bing': 'baz'})
+ stream = "stream"
+ group = "group"
+ consumer = "consumer"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"bing": "baz"})
r.xgroup_create(stream, group, 0)
expected = [
@@ -3536,11 +3803,11 @@ class TestRedisCommands:
[
get_stream_message(r, stream, m1),
get_stream_message(r, stream, m2),
- ]
+ ],
]
]
# xread starting at 0 returns both messages
- assert r.xreadgroup(group, consumer, streams={stream: '>'}) == expected
+ assert r.xreadgroup(group, consumer, streams={stream: ">"}) == expected
r.xgroup_destroy(stream, group)
r.xgroup_create(stream, group, 0)
@@ -3550,34 +3817,34 @@ class TestRedisCommands:
stream.encode(),
[
get_stream_message(r, stream, m1),
- ]
+ ],
]
]
# xread with count=1 returns only the first message
- assert r.xreadgroup(group, consumer,
- streams={stream: '>'}, count=1) == expected
+ assert r.xreadgroup(group, consumer, streams={stream: ">"}, count=1) == expected
r.xgroup_destroy(stream, group)
# create the group using $ as the last id meaning subsequent reads
# will only find messages added after this
- r.xgroup_create(stream, group, '$')
+ r.xgroup_create(stream, group, "$")
expected = []
# xread starting after the last message returns an empty message list
- assert r.xreadgroup(group, consumer, streams={stream: '>'}) == expected
+ assert r.xreadgroup(group, consumer, streams={stream: ">"}) == expected
# xreadgroup with noack does not have any items in the PEL
r.xgroup_destroy(stream, group)
- r.xgroup_create(stream, group, '0')
- assert len(r.xreadgroup(group, consumer, streams={stream: '>'},
- noack=True)[0][1]) == 2
+ r.xgroup_create(stream, group, "0")
+ assert (
+ len(r.xreadgroup(group, consumer, streams={stream: ">"}, noack=True)[0][1])
+ == 2
+ )
# now there should be nothing pending
- assert len(r.xreadgroup(group, consumer,
- streams={stream: '0'})[0][1]) == 0
+ assert len(r.xreadgroup(group, consumer, streams={stream: "0"})[0][1]) == 0
r.xgroup_destroy(stream, group)
- r.xgroup_create(stream, group, '0')
+ r.xgroup_create(stream, group, "0")
# delete all the messages in the stream
expected = [
[
@@ -3585,20 +3852,20 @@ class TestRedisCommands:
[
(m1, {}),
(m2, {}),
- ]
+ ],
]
]
- r.xreadgroup(group, consumer, streams={stream: '>'})
+ r.xreadgroup(group, consumer, streams={stream: ">"})
r.xtrim(stream, 0)
- assert r.xreadgroup(group, consumer, streams={stream: '0'}) == expected
+ assert r.xreadgroup(group, consumer, streams={stream: "0"}) == expected
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xrevrange(self, r):
- stream = 'stream'
- m1 = r.xadd(stream, {'foo': 'bar'})
- m2 = r.xadd(stream, {'foo': 'bar'})
- m3 = r.xadd(stream, {'foo': 'bar'})
- m4 = r.xadd(stream, {'foo': 'bar'})
+ stream = "stream"
+ m1 = r.xadd(stream, {"foo": "bar"})
+ m2 = r.xadd(stream, {"foo": "bar"})
+ m3 = r.xadd(stream, {"foo": "bar"})
+ m4 = r.xadd(stream, {"foo": "bar"})
def get_ids(results):
return [result[0] for result in results]
@@ -3615,17 +3882,17 @@ class TestRedisCommands:
results = r.xrevrange(stream, min=m2, count=1)
assert get_ids(results) == [m4]
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_xtrim(self, r):
- stream = 'stream'
+ stream = "stream"
# trimming an empty key doesn't do anything
assert r.xtrim(stream, 1000) == 0
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
# trimming an amount large than the number of messages
# doesn't do anything
@@ -3634,14 +3901,14 @@ class TestRedisCommands:
# 1 message is trimmed
assert r.xtrim(stream, 3, approximate=False) == 1
- @skip_if_server_version_lt('6.2.4')
+ @skip_if_server_version_lt("6.2.4")
def test_xtrim_minlen_and_length_args(self, r):
- stream = 'stream'
+ stream = "stream"
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
# Future self: No limits without approximate, according to the api
with pytest.raises(redis.ResponseError):
@@ -3655,99 +3922,105 @@ class TestRedisCommands:
assert r.xtrim(stream, maxlen=3, minid="sometestvalue")
# minid with a limit
- m1 = r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ m1 = r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
assert r.xtrim(stream, None, approximate=True, minid=m1, limit=3) == 0
# pure minid
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- m4 = r.xadd(stream, {'foo': 'bar'})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ m4 = r.xadd(stream, {"foo": "bar"})
assert r.xtrim(stream, None, approximate=False, minid=m4) == 7
# minid approximate
- r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
- m3 = r.xadd(stream, {'foo': 'bar'})
- r.xadd(stream, {'foo': 'bar'})
+ r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
+ m3 = r.xadd(stream, {"foo": "bar"})
+ r.xadd(stream, {"foo": "bar"})
assert r.xtrim(stream, None, approximate=True, minid=m3) == 0
def test_bitfield_operations(self, r):
# comments show affected bits
- bf = r.bitfield('a')
- resp = (bf
- .set('u8', 8, 255) # 00000000 11111111
- .get('u8', 0) # 00000000
- .get('u4', 8) # 1111
- .get('u4', 12) # 1111
- .get('u4', 13) # 111 0
- .execute())
+ bf = r.bitfield("a")
+ resp = (
+ bf.set("u8", 8, 255) # 00000000 11111111
+ .get("u8", 0) # 00000000
+ .get("u4", 8) # 1111
+ .get("u4", 12) # 1111
+ .get("u4", 13) # 111 0
+ .execute()
+ )
assert resp == [0, 0, 15, 15, 14]
# .set() returns the previous value...
- resp = (bf
- .set('u8', 4, 1) # 0000 0001
- .get('u16', 0) # 00000000 00011111
- .set('u16', 0, 0) # 00000000 00000000
- .execute())
+ resp = (
+ bf.set("u8", 4, 1) # 0000 0001
+ .get("u16", 0) # 00000000 00011111
+ .set("u16", 0, 0) # 00000000 00000000
+ .execute()
+ )
assert resp == [15, 31, 31]
# incrby adds to the value
- resp = (bf
- .incrby('u8', 8, 254) # 00000000 11111110
- .incrby('u8', 8, 1) # 00000000 11111111
- .get('u16', 0) # 00000000 11111111
- .execute())
+ resp = (
+ bf.incrby("u8", 8, 254) # 00000000 11111110
+ .incrby("u8", 8, 1) # 00000000 11111111
+ .get("u16", 0) # 00000000 11111111
+ .execute()
+ )
assert resp == [254, 255, 255]
# Verify overflow protection works as a method:
- r.delete('a')
- resp = (bf
- .set('u8', 8, 254) # 00000000 11111110
- .overflow('fail')
- .incrby('u8', 8, 2) # incrby 2 would overflow, None returned
- .incrby('u8', 8, 1) # 00000000 11111111
- .incrby('u8', 8, 1) # incrby 1 would overflow, None returned
- .get('u16', 0) # 00000000 11111111
- .execute())
+ r.delete("a")
+ resp = (
+ bf.set("u8", 8, 254) # 00000000 11111110
+ .overflow("fail")
+ .incrby("u8", 8, 2) # incrby 2 would overflow, None returned
+ .incrby("u8", 8, 1) # 00000000 11111111
+ .incrby("u8", 8, 1) # incrby 1 would overflow, None returned
+ .get("u16", 0) # 00000000 11111111
+ .execute()
+ )
assert resp == [0, None, 255, None, 255]
# Verify overflow protection works as arg to incrby:
- r.delete('a')
- resp = (bf
- .set('u8', 8, 255) # 00000000 11111111
- .incrby('u8', 8, 1) # 00000000 00000000 wrap default
- .set('u8', 8, 255) # 00000000 11111111
- .incrby('u8', 8, 1, 'FAIL') # 00000000 11111111 fail
- .incrby('u8', 8, 1) # 00000000 11111111 still fail
- .get('u16', 0) # 00000000 11111111
- .execute())
+ r.delete("a")
+ resp = (
+ bf.set("u8", 8, 255) # 00000000 11111111
+ .incrby("u8", 8, 1) # 00000000 00000000 wrap default
+ .set("u8", 8, 255) # 00000000 11111111
+ .incrby("u8", 8, 1, "FAIL") # 00000000 11111111 fail
+ .incrby("u8", 8, 1) # 00000000 11111111 still fail
+ .get("u16", 0) # 00000000 11111111
+ .execute()
+ )
assert resp == [0, 0, 0, None, None, 255]
# test default default_overflow
- r.delete('a')
- bf = r.bitfield('a', default_overflow='FAIL')
- resp = (bf
- .set('u8', 8, 255) # 00000000 11111111
- .incrby('u8', 8, 1) # 00000000 11111111 fail default
- .get('u16', 0) # 00000000 11111111
- .execute())
+ r.delete("a")
+ bf = r.bitfield("a", default_overflow="FAIL")
+ resp = (
+ bf.set("u8", 8, 255) # 00000000 11111111
+ .incrby("u8", 8, 1) # 00000000 11111111 fail default
+ .get("u16", 0) # 00000000 11111111
+ .execute()
+ )
assert resp == [0, None, 255]
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_help(self, r):
with pytest.raises(NotImplementedError):
r.memory_help()
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_doctor(self, r):
with pytest.raises(NotImplementedError):
r.memory_doctor()
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_malloc_stats(self, r):
if skip_if_redis_enterprise(None).args[0] is True:
with pytest.raises(redis.exceptions.ResponseError):
@@ -3756,11 +4029,11 @@ class TestRedisCommands:
assert r.memory_malloc_stats()
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_stats(self, r):
# put a key into the current db to make sure that "db.<current-db>"
# has data
- r.set('foo', 'bar')
+ r.set("foo", "bar")
if skip_if_redis_enterprise(None).args[0] is True:
with pytest.raises(redis.exceptions.ResponseError):
@@ -3770,104 +4043,113 @@ class TestRedisCommands:
stats = r.memory_stats()
assert isinstance(stats, dict)
for key, value in stats.items():
- if key.startswith('db.'):
+ if key.startswith("db."):
assert isinstance(value, dict)
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
def test_memory_usage(self, r):
- r.set('foo', 'bar')
- assert isinstance(r.memory_usage('foo'), int)
+ r.set("foo", "bar")
+ assert isinstance(r.memory_usage("foo"), int)
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
@skip_if_redis_enterprise
def test_module_list(self, r):
assert isinstance(r.module_list(), list)
for x in r.module_list():
assert isinstance(x, dict)
- @skip_if_server_version_lt('2.8.13')
+ @skip_if_server_version_lt("2.8.13")
def test_command_count(self, r):
res = r.command_count()
assert isinstance(res, int)
assert res >= 100
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.13')
+ @skip_if_server_version_lt("2.8.13")
def test_command_getkeys(self, r):
- res = r.command_getkeys('MSET', 'a', 'b', 'c', 'd', 'e', 'f')
- assert res == ['a', 'c', 'e']
- res = r.command_getkeys('EVAL', '"not consulted"',
- '3', 'key1', 'key2', 'key3',
- 'arg1', 'arg2', 'arg3', 'argN')
- assert res == ['key1', 'key2', 'key3']
-
- @skip_if_server_version_lt('2.8.13')
+ res = r.command_getkeys("MSET", "a", "b", "c", "d", "e", "f")
+ assert res == ["a", "c", "e"]
+ res = r.command_getkeys(
+ "EVAL",
+ '"not consulted"',
+ "3",
+ "key1",
+ "key2",
+ "key3",
+ "arg1",
+ "arg2",
+ "arg3",
+ "argN",
+ )
+ assert res == ["key1", "key2", "key3"]
+
+ @skip_if_server_version_lt("2.8.13")
def test_command(self, r):
res = r.command()
assert len(res) >= 100
cmds = list(res.keys())
- assert 'set' in cmds
- assert 'get' in cmds
+ assert "set" in cmds
+ assert "get" in cmds
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('4.0.0')
+ @skip_if_server_version_lt("4.0.0")
@skip_if_redis_enterprise
def test_module(self, r):
with pytest.raises(redis.exceptions.ModuleError) as excinfo:
- r.module_load('/some/fake/path')
+ r.module_load("/some/fake/path")
assert "Error loading the extension." in str(excinfo.value)
with pytest.raises(redis.exceptions.ModuleError) as excinfo:
- r.module_load('/some/fake/path', 'arg1', 'arg2', 'arg3', 'arg4')
+ r.module_load("/some/fake/path", "arg1", "arg2", "arg3", "arg4")
assert "Error loading the extension." in str(excinfo.value)
- @skip_if_server_version_lt('2.6.0')
+ @skip_if_server_version_lt("2.6.0")
def test_restore(self, r):
# standard restore
- key = 'foo'
- r.set(key, 'bar')
+ key = "foo"
+ r.set(key, "bar")
dumpdata = r.dump(key)
r.delete(key)
assert r.restore(key, 0, dumpdata)
- assert r.get(key) == b'bar'
+ assert r.get(key) == b"bar"
# overwrite restore
with pytest.raises(redis.exceptions.ResponseError):
assert r.restore(key, 0, dumpdata)
- r.set(key, 'a new value!')
+ r.set(key, "a new value!")
assert r.restore(key, 0, dumpdata, replace=True)
- assert r.get(key) == b'bar'
+ assert r.get(key) == b"bar"
# ttl check
- key2 = 'another'
- r.set(key2, 'blee!')
+ key2 = "another"
+ r.set(key2, "blee!")
dumpdata = r.dump(key2)
r.delete(key2)
assert r.restore(key2, 0, dumpdata)
assert r.ttl(key2) == -1
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_restore_idletime(self, r):
- key = 'yayakey'
- r.set(key, 'blee!')
+ key = "yayakey"
+ r.set(key, "blee!")
dumpdata = r.dump(key)
r.delete(key)
assert r.restore(key, 0, dumpdata, idletime=5)
- assert r.get(key) == b'blee!'
+ assert r.get(key) == b"blee!"
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
def test_restore_frequency(self, r):
- key = 'yayakey'
- r.set(key, 'blee!')
+ key = "yayakey"
+ r.set(key, "blee!")
dumpdata = r.dump(key)
r.delete(key)
assert r.restore(key, 0, dumpdata, frequency=5)
- assert r.get(key) == b'blee!'
+ assert r.get(key) == b"blee!"
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('5.0.0')
+ @skip_if_server_version_lt("5.0.0")
@skip_if_redis_enterprise
def test_replicaof(self, r):
with pytest.raises(redis.ResponseError):
@@ -3877,36 +4159,38 @@ class TestRedisCommands:
@pytest.mark.onlynoncluster
class TestBinarySave:
-
def test_binary_get_set(self, r):
- assert r.set(' foo bar ', '123')
- assert r.get(' foo bar ') == b'123'
+ assert r.set(" foo bar ", "123")
+ assert r.get(" foo bar ") == b"123"
- assert r.set(' foo\r\nbar\r\n ', '456')
- assert r.get(' foo\r\nbar\r\n ') == b'456'
+ assert r.set(" foo\r\nbar\r\n ", "456")
+ assert r.get(" foo\r\nbar\r\n ") == b"456"
- assert r.set(' \r\n\t\x07\x13 ', '789')
- assert r.get(' \r\n\t\x07\x13 ') == b'789'
+ assert r.set(" \r\n\t\x07\x13 ", "789")
+ assert r.get(" \r\n\t\x07\x13 ") == b"789"
- assert sorted(r.keys('*')) == \
- [b' \r\n\t\x07\x13 ', b' foo\r\nbar\r\n ', b' foo bar ']
+ assert sorted(r.keys("*")) == [
+ b" \r\n\t\x07\x13 ",
+ b" foo\r\nbar\r\n ",
+ b" foo bar ",
+ ]
- assert r.delete(' foo bar ')
- assert r.delete(' foo\r\nbar\r\n ')
- assert r.delete(' \r\n\t\x07\x13 ')
+ assert r.delete(" foo bar ")
+ assert r.delete(" foo\r\nbar\r\n ")
+ assert r.delete(" \r\n\t\x07\x13 ")
def test_binary_lists(self, r):
mapping = {
- b'foo bar': [b'1', b'2', b'3'],
- b'foo\r\nbar\r\n': [b'4', b'5', b'6'],
- b'foo\tbar\x07': [b'7', b'8', b'9'],
+ b"foo bar": [b"1", b"2", b"3"],
+ b"foo\r\nbar\r\n": [b"4", b"5", b"6"],
+ b"foo\tbar\x07": [b"7", b"8", b"9"],
}
# fill in lists
for key, value in mapping.items():
r.rpush(key, *value)
# check that KEYS returns all the keys as they are
- assert sorted(r.keys('*')) == sorted(mapping.keys())
+ assert sorted(r.keys("*")) == sorted(mapping.keys())
# check that it is possible to get list content by key name
for key, value in mapping.items():
@@ -3917,42 +4201,44 @@ class TestBinarySave:
Older Redis versions contained 'allocation_stats' in INFO that
was the cause of a number of bugs when parsing.
"""
- info = "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330," \
- "13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020," \
- "20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303," \
- "27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160," \
- "34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523," \
- "41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171," \
- "49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332," \
- "58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30," \
- "67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25," \
- "76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46," \
- "85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20," \
- "94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15," \
- "103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52," \
- "111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54," \
- "119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52," \
- "127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62," \
- "135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7," \
- "144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1," \
- "155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2," \
- "172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3," \
- "187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1," \
- "207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2," \
- "220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1," \
- ">=256=203"
+ info = (
+ "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330,"
+ "13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020,"
+ "20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303,"
+ "27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160,"
+ "34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523,"
+ "41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171,"
+ "49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332,"
+ "58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30,"
+ "67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25,"
+ "76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46,"
+ "85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20,"
+ "94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15,"
+ "103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52,"
+ "111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54,"
+ "119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52,"
+ "127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62,"
+ "135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7,"
+ "144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1,"
+ "155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2,"
+ "172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3,"
+ "187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1,"
+ "207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2,"
+ "220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1,"
+ ">=256=203"
+ )
parsed = parse_info(info)
- assert 'allocation_stats' in parsed
- assert '6' in parsed['allocation_stats']
- assert '>=256' in parsed['allocation_stats']
+ assert "allocation_stats" in parsed
+ assert "6" in parsed["allocation_stats"]
+ assert ">=256" in parsed["allocation_stats"]
@skip_if_redis_enterprise
def test_large_responses(self, r):
"The PythonParser has some special cases for return values > 1MB"
# load up 5MB of data into a key
- data = ''.join([ascii_letters] * (5000000 // len(ascii_letters)))
- r['a'] = data
- assert r['a'] == data.encode()
+ data = "".join([ascii_letters] * (5000000 // len(ascii_letters)))
+ r["a"] = data
+ assert r["a"] == data.encode()
def test_floating_point_encoding(self, r):
"""
@@ -3960,5 +4246,5 @@ class TestBinarySave:
precision.
"""
timestamp = 1349673917.939762
- r.zadd('a', {'a1': timestamp})
- assert r.zscore('a', 'a1') == timestamp
+ r.zadd("a", {"a1": timestamp})
+ assert r.zscore("a", "a1") == timestamp
diff --git a/tests/test_connection.py b/tests/test_connection.py
index 0071aca..22f1b71 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -1,37 +1,40 @@
-from unittest import mock
import types
+from unittest import mock
+
import pytest
from redis.exceptions import InvalidResponse
from redis.utils import HIREDIS_AVAILABLE
+
from .conftest import skip_if_server_version_lt
-@pytest.mark.skipif(HIREDIS_AVAILABLE, reason='PythonParser only')
+@pytest.mark.skipif(HIREDIS_AVAILABLE, reason="PythonParser only")
@pytest.mark.onlynoncluster
def test_invalid_response(r):
- raw = b'x'
+ raw = b"x"
parser = r.connection._parser
- with mock.patch.object(parser._buffer, 'readline', return_value=raw):
+ with mock.patch.object(parser._buffer, "readline", return_value=raw):
with pytest.raises(InvalidResponse) as cm:
parser.read_response()
- assert str(cm.value) == f'Protocol Error: {raw!r}'
+ assert str(cm.value) == f"Protocol Error: {raw!r}"
-@skip_if_server_version_lt('4.0.0')
+@skip_if_server_version_lt("4.0.0")
@pytest.mark.redismod
def test_loading_external_modules(modclient):
def inner():
pass
- modclient.load_external_module('myfuncname', inner)
- assert getattr(modclient, 'myfuncname') == inner
- assert isinstance(getattr(modclient, 'myfuncname'), types.FunctionType)
+ modclient.load_external_module("myfuncname", inner)
+ assert getattr(modclient, "myfuncname") == inner
+ assert isinstance(getattr(modclient, "myfuncname"), types.FunctionType)
# and call it
from redis.commands import RedisModuleCommands
+
j = RedisModuleCommands.json
- modclient.load_external_module('sometestfuncname', j)
+ modclient.load_external_module("sometestfuncname", j)
# d = {'hello': 'world!'}
# mod = j(modclient)
diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py
index 288d43d..2602af8 100644
--- a/tests/test_connection_pool.py
+++ b/tests/test_connection_pool.py
@@ -1,17 +1,15 @@
import os
-import pytest
import re
-import redis
import time
+from threading import Thread
from unittest import mock
-from threading import Thread
+import pytest
+
+import redis
from redis.connection import ssl_available, to_bool
-from .conftest import (
- skip_if_server_version_lt,
- skip_if_redis_enterprise,
- _get_client
-)
+
+from .conftest import _get_client, skip_if_redis_enterprise, skip_if_server_version_lt
from .test_pubsub import wait_for_message
@@ -30,107 +28,122 @@ class DummyConnection:
class TestConnectionPool:
- def get_pool(self, connection_kwargs=None, max_connections=None,
- connection_class=redis.Connection):
+ def get_pool(
+ self,
+ connection_kwargs=None,
+ max_connections=None,
+ connection_class=redis.Connection,
+ ):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
- **connection_kwargs)
+ **connection_kwargs,
+ )
return pool
def test_connection_creation(self):
- connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
- pool = self.get_pool(connection_kwargs=connection_kwargs,
- connection_class=DummyConnection)
- connection = pool.get_connection('_')
+ connection_kwargs = {"foo": "bar", "biz": "baz"}
+ pool = self.get_pool(
+ connection_kwargs=connection_kwargs, connection_class=DummyConnection
+ )
+ connection = pool.get_connection("_")
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
- c1 = pool.get_connection('_')
- c2 = pool.get_connection('_')
+ c1 = pool.get_connection("_")
+ c2 = pool.get_connection("_")
assert c1 != c2
def test_max_connections(self, master_host):
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
- pool = self.get_pool(max_connections=2,
- connection_kwargs=connection_kwargs)
- pool.get_connection('_')
- pool.get_connection('_')
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
+ pool = self.get_pool(max_connections=2, connection_kwargs=connection_kwargs)
+ pool.get_connection("_")
+ pool.get_connection("_")
with pytest.raises(redis.ConnectionError):
- pool.get_connection('_')
+ pool.get_connection("_")
def test_reuse_previously_released_connection(self, master_host):
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
- c1 = pool.get_connection('_')
+ c1 = pool.get_connection("_")
pool.release(c1)
- c2 = pool.get_connection('_')
+ c2 = pool.get_connection("_")
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {
- 'host': 'localhost',
- 'port': 6379,
- 'db': 1,
- 'client_name': 'test-client'
+ "host": "localhost",
+ "port": 6379,
+ "db": 1,
+ "client_name": "test-client",
}
- pool = self.get_pool(connection_kwargs=connection_kwargs,
- connection_class=redis.Connection)
- expected = ('ConnectionPool<Connection<'
- 'host=localhost,port=6379,db=1,client_name=test-client>>')
+ pool = self.get_pool(
+ connection_kwargs=connection_kwargs, connection_class=redis.Connection
+ )
+ expected = (
+ "ConnectionPool<Connection<"
+ "host=localhost,port=6379,db=1,client_name=test-client>>"
+ )
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
- connection_kwargs = {
- 'path': '/abc',
- 'db': 1,
- 'client_name': 'test-client'
- }
- pool = self.get_pool(connection_kwargs=connection_kwargs,
- connection_class=redis.UnixDomainSocketConnection)
- expected = ('ConnectionPool<UnixDomainSocketConnection<'
- 'path=/abc,db=1,client_name=test-client>>')
+ connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"}
+ pool = self.get_pool(
+ connection_kwargs=connection_kwargs,
+ connection_class=redis.UnixDomainSocketConnection,
+ )
+ expected = (
+ "ConnectionPool<UnixDomainSocketConnection<"
+ "path=/abc,db=1,client_name=test-client>>"
+ )
assert repr(pool) == expected
class TestBlockingConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
- pool = redis.BlockingConnectionPool(connection_class=DummyConnection,
- max_connections=max_connections,
- timeout=timeout,
- **connection_kwargs)
+ pool = redis.BlockingConnectionPool(
+ connection_class=DummyConnection,
+ max_connections=max_connections,
+ timeout=timeout,
+ **connection_kwargs,
+ )
return pool
def test_connection_creation(self, master_host):
- connection_kwargs = {'foo': 'bar', 'biz': 'baz',
- 'host': master_host[0], 'port': master_host[1]}
+ connection_kwargs = {
+ "foo": "bar",
+ "biz": "baz",
+ "host": master_host[0],
+ "port": master_host[1],
+ }
pool = self.get_pool(connection_kwargs=connection_kwargs)
- connection = pool.get_connection('_')
+ connection = pool.get_connection("_")
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
- c1 = pool.get_connection('_')
- c2 = pool.get_connection('_')
+ c1 = pool.get_connection("_")
+ c2 = pool.get_connection("_")
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self, master_host):
"When out of connections, block for timeout seconds, then raise"
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
- pool = self.get_pool(max_connections=1, timeout=0.1,
- connection_kwargs=connection_kwargs)
- pool.get_connection('_')
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
+ pool = self.get_pool(
+ max_connections=1, timeout=0.1, connection_kwargs=connection_kwargs
+ )
+ pool.get_connection("_")
start = time.time()
with pytest.raises(redis.ConnectionError):
- pool.get_connection('_')
+ pool.get_connection("_")
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
@@ -139,10 +152,11 @@ class TestBlockingConnectionPool:
When out of connections, block until another connection is released
to the pool
"""
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
- pool = self.get_pool(max_connections=1, timeout=2,
- connection_kwargs=connection_kwargs)
- c1 = pool.get_connection('_')
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
+ pool = self.get_pool(
+ max_connections=1, timeout=2, connection_kwargs=connection_kwargs
+ )
+ c1 = pool.get_connection("_")
def target():
time.sleep(0.1)
@@ -150,294 +164,295 @@ class TestBlockingConnectionPool:
start = time.time()
Thread(target=target).start()
- pool.get_connection('_')
+ pool.get_connection("_")
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self, master_host):
- connection_kwargs = {'host': master_host[0], 'port': master_host[1]}
+ connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
- c1 = pool.get_connection('_')
+ c1 = pool.get_connection("_")
pool.release(c1)
- c2 = pool.get_connection('_')
+ c2 = pool.get_connection("_")
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
- host='localhost',
- port=6379,
- client_name='test-client'
+ host="localhost", port=6379, client_name="test-client"
+ )
+ expected = (
+ "ConnectionPool<Connection<"
+ "host=localhost,port=6379,db=0,client_name=test-client>>"
)
- expected = ('ConnectionPool<Connection<'
- 'host=localhost,port=6379,db=0,client_name=test-client>>')
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
- path='abc',
- client_name='test-client'
+ path="abc",
+ client_name="test-client",
+ )
+ expected = (
+ "ConnectionPool<UnixDomainSocketConnection<"
+ "path=abc,db=0,client_name=test-client>>"
)
- expected = ('ConnectionPool<UnixDomainSocketConnection<'
- 'path=abc,db=0,client_name=test-client>>')
assert repr(pool) == expected
class TestConnectionPoolURLParsing:
def test_hostname(self):
- pool = redis.ConnectionPool.from_url('redis://my.host')
+ pool = redis.ConnectionPool.from_url("redis://my.host")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'my.host',
+ "host": "my.host",
}
def test_quoted_hostname(self):
- pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+')
+ pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'my / host +=+',
+ "host": "my / host +=+",
}
def test_port(self):
- pool = redis.ConnectionPool.from_url('redis://localhost:6380')
+ pool = redis.ConnectionPool.from_url("redis://localhost:6380")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'port': 6380,
+ "host": "localhost",
+ "port": 6380,
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
- pool = redis.ConnectionPool.from_url('redis://myuser:@localhost')
+ pool = redis.ConnectionPool.from_url("redis://myuser:@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'username': 'myuser',
+ "host": "localhost",
+ "username": "myuser",
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
- 'redis://%2Fmyuser%2F%2B name%3D%24+:@localhost')
+ "redis://%2Fmyuser%2F%2B name%3D%24+:@localhost"
+ )
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'username': '/myuser/+ name=$+',
+ "host": "localhost",
+ "username": "/myuser/+ name=$+",
}
def test_password(self):
- pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost')
+ pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'password': 'mypassword',
+ "host": "localhost",
+ "password": "mypassword",
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
- 'redis://:%2Fmypass%2F%2B word%3D%24+@localhost')
+ "redis://:%2Fmypass%2F%2B word%3D%24+@localhost"
+ )
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'password': '/mypass/+ word=$+',
+ "host": "localhost",
+ "password": "/mypass/+ word=$+",
}
@skip_if_server_version_lt("6.0.0")
def test_username_and_password(self):
- pool = redis.ConnectionPool.from_url('redis://myuser:mypass@localhost')
+ pool = redis.ConnectionPool.from_url("redis://myuser:mypass@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'username': 'myuser',
- 'password': 'mypass',
+ "host": "localhost",
+ "username": "myuser",
+ "password": "mypass",
}
def test_db_as_argument(self):
- pool = redis.ConnectionPool.from_url('redis://localhost', db=1)
+ pool = redis.ConnectionPool.from_url("redis://localhost", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'db': 1,
+ "host": "localhost",
+ "db": 1,
}
def test_db_in_path(self):
- pool = redis.ConnectionPool.from_url('redis://localhost/2', db=1)
+ pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'db': 2,
+ "host": "localhost",
+ "db": 2,
}
def test_db_in_querystring(self):
- pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3',
- db=1)
+ pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'db': 3,
+ "host": "localhost",
+ "db": 3,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
- 'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10'
- '&socket_keepalive=&retry_on_timeout=Yes&max_connections=10'
+ "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10"
+ "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'db': 2,
- 'socket_timeout': 20.0,
- 'socket_connect_timeout': 10.0,
- 'retry_on_timeout': True,
+ "host": "localhost",
+ "db": 2,
+ "socket_timeout": 20.0,
+ "socket_connect_timeout": 10.0,
+ "retry_on_timeout": True,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
- (None, None),
- (None, ''),
- (False, 0), (False, '0'),
- (False, 'f'), (False, 'F'), (False, 'False'),
- (False, 'n'), (False, 'N'), (False, 'No'),
- (True, 1), (True, '1'),
- (True, 'y'), (True, 'Y'), (True, 'Yes'),
+ (None, None),
+ (None, ""),
+ (False, 0),
+ (False, "0"),
+ (False, "f"),
+ (False, "F"),
+ (False, "False"),
+ (False, "n"),
+ (False, "N"),
+ (False, "No"),
+ (True, 1),
+ (True, "1"),
+ (True, "y"),
+ (True, "Y"),
+ (True, "Yes"),
):
assert expected is to_bool(value)
def test_client_name_in_querystring(self):
- pool = redis.ConnectionPool.from_url(
- 'redis://location?client_name=test-client'
- )
- assert pool.connection_kwargs['client_name'] == 'test-client'
+ pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
+ assert pool.connection_kwargs["client_name"] == "test-client"
def test_invalid_extra_typed_querystring_options(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url(
- 'redis://localhost/2?socket_timeout=_&'
- 'socket_connect_timeout=abc'
+ "redis://localhost/2?socket_timeout=_&" "socket_connect_timeout=abc"
)
def test_extra_querystring_options(self):
- pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2')
+ pool = redis.ConnectionPool.from_url("redis://localhost?a=1&b=2")
assert pool.connection_class == redis.Connection
- assert pool.connection_kwargs == {
- 'host': 'localhost',
- 'a': '1',
- 'b': '2'
- }
+ assert pool.connection_kwargs == {"host": "localhost", "a": "1", "b": "2"}
def test_calling_from_subclass_returns_correct_instance(self):
- pool = redis.BlockingConnectionPool.from_url('redis://localhost')
+ pool = redis.BlockingConnectionPool.from_url("redis://localhost")
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
- r = redis.Redis.from_url('redis://myhost')
+ r = redis.Redis.from_url("redis://myhost")
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
- 'host': 'myhost',
+ "host": "myhost",
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError) as cm:
- redis.ConnectionPool.from_url('localhost')
+ redis.ConnectionPool.from_url("localhost")
assert str(cm.value) == (
- 'Redis URL must specify one of the following schemes '
- '(redis://, rediss://, unix://)'
+ "Redis URL must specify one of the following schemes "
+ "(redis://, rediss://, unix://)"
)
class TestConnectionPoolUnixSocketURLParsing:
def test_defaults(self):
- pool = redis.ConnectionPool.from_url('unix:///socket')
+ pool = redis.ConnectionPool.from_url("unix:///socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
+ "path": "/socket",
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
- pool = redis.ConnectionPool.from_url('unix://myuser:@/socket')
+ pool = redis.ConnectionPool.from_url("unix://myuser:@/socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
- 'username': 'myuser',
+ "path": "/socket",
+ "username": "myuser",
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
- 'unix://%2Fmyuser%2F%2B name%3D%24+:@/socket')
+ "unix://%2Fmyuser%2F%2B name%3D%24+:@/socket"
+ )
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
- 'username': '/myuser/+ name=$+',
+ "path": "/socket",
+ "username": "/myuser/+ name=$+",
}
def test_password(self):
- pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket')
+ pool = redis.ConnectionPool.from_url("unix://:mypassword@/socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
- 'password': 'mypassword',
+ "path": "/socket",
+ "password": "mypassword",
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
- 'unix://:%2Fmypass%2F%2B word%3D%24+@/socket')
+ "unix://:%2Fmypass%2F%2B word%3D%24+@/socket"
+ )
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
- 'password': '/mypass/+ word=$+',
+ "path": "/socket",
+ "password": "/mypass/+ word=$+",
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
- 'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket')
+ "unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket"
+ )
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/my/path/to/../+_+=$ocket',
- 'password': 'mypassword',
+ "path": "/my/path/to/../+_+=$ocket",
+ "password": "mypassword",
}
def test_db_as_argument(self):
- pool = redis.ConnectionPool.from_url('unix:///socket', db=1)
+ pool = redis.ConnectionPool.from_url("unix:///socket", db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
- 'db': 1,
+ "path": "/socket",
+ "db": 1,
}
def test_db_in_querystring(self):
- pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1)
+ pool = redis.ConnectionPool.from_url("unix:///socket?db=2", db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
- 'path': '/socket',
- 'db': 2,
+ "path": "/socket",
+ "db": 2,
}
def test_client_name_in_querystring(self):
- pool = redis.ConnectionPool.from_url(
- 'redis://location?client_name=test-client'
- )
- assert pool.connection_kwargs['client_name'] == 'test-client'
+ pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
+ assert pool.connection_kwargs["client_name"] == "test-client"
def test_extra_querystring_options(self):
- pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2')
+ pool = redis.ConnectionPool.from_url("unix:///socket?a=1&b=2")
assert pool.connection_class == redis.UnixDomainSocketConnection
- assert pool.connection_kwargs == {
- 'path': '/socket',
- 'a': '1',
- 'b': '2'
- }
+ assert pool.connection_kwargs == {"path": "/socket", "a": "1", "b": "2"}
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
class TestSSLConnectionURLParsing:
def test_host(self):
- pool = redis.ConnectionPool.from_url('rediss://my.host')
+ pool = redis.ConnectionPool.from_url("rediss://my.host")
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
- 'host': 'my.host',
+ "host": "my.host",
}
def test_cert_reqs_options(self):
@@ -447,25 +462,20 @@ class TestSSLConnectionURLParsing:
def get_connection(self, *args, **kwargs):
return self.make_connection()
- pool = DummyConnectionPool.from_url(
- 'rediss://?ssl_cert_reqs=none')
- assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE
+ pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=none")
+ assert pool.get_connection("_").cert_reqs == ssl.CERT_NONE
- pool = DummyConnectionPool.from_url(
- 'rediss://?ssl_cert_reqs=optional')
- assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL
+ pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=optional")
+ assert pool.get_connection("_").cert_reqs == ssl.CERT_OPTIONAL
- pool = DummyConnectionPool.from_url(
- 'rediss://?ssl_cert_reqs=required')
- assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED
+ pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=required")
+ assert pool.get_connection("_").cert_reqs == ssl.CERT_REQUIRED
- pool = DummyConnectionPool.from_url(
- 'rediss://?ssl_check_hostname=False')
- assert pool.get_connection('_').check_hostname is False
+ pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=False")
+ assert pool.get_connection("_").check_hostname is False
- pool = DummyConnectionPool.from_url(
- 'rediss://?ssl_check_hostname=True')
- assert pool.get_connection('_').check_hostname is True
+ pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=True")
+ assert pool.get_connection("_").check_hostname is True
class TestConnection:
@@ -485,7 +495,7 @@ class TestConnection:
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.8')
+ @skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise
def test_busy_loading_disconnects_socket(self, r):
"""
@@ -493,11 +503,11 @@ class TestConnection:
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
- r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
+ r.execute_command("DEBUG", "ERROR", "LOADING fake message")
assert not r.connection._sock
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.8')
+ @skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
@@ -506,15 +516,14 @@ class TestConnection:
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
- pipe.immediate_execute_command('DEBUG', 'ERROR',
- 'LOADING fake message')
+ pipe.immediate_execute_command("DEBUG", "ERROR", "LOADING fake message")
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.8')
+ @skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise
def test_busy_loading_from_pipeline(self, r):
"""
@@ -522,7 +531,7 @@ class TestConnection:
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
- pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
+ pipe.execute_command("DEBUG", "ERROR", "LOADING fake message")
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
@@ -530,31 +539,31 @@ class TestConnection:
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
- @skip_if_server_version_lt('2.8.8')
+ @skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
- r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
+ r.execute_command("DEBUG", "ERROR", "READONLY blah blah")
def test_connect_from_url_tcp(self):
- connection = redis.Redis.from_url('redis://localhost')
+ connection = redis.Redis.from_url("redis://localhost")
pool = connection.connection_pool
- assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
- 'ConnectionPool',
- 'Connection',
- 'host=localhost,port=6379,db=0',
+ assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
+ "ConnectionPool",
+ "Connection",
+ "host=localhost,port=6379,db=0",
)
def test_connect_from_url_unix(self):
- connection = redis.Redis.from_url('unix:///path/to/socket')
+ connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
- assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
- 'ConnectionPool',
- 'UnixDomainSocketConnection',
- 'path=/path/to/socket,db=0',
+ assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
+ "ConnectionPool",
+ "UnixDomainSocketConnection",
+ "path=/path/to/socket,db=0",
)
@skip_if_redis_enterprise
@@ -564,28 +573,27 @@ class TestConnection:
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
- r.execute_command('DEBUG', 'ERROR',
- 'ERR Client sent AUTH, but no password is set')
+ r.execute_command(
+ "DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set"
+ )
@skip_if_redis_enterprise
def test_connect_invalid_password_supplied(self, r):
"AuthenticationError should be raised when sending the wrong password"
with pytest.raises(redis.AuthenticationError):
- r.execute_command('DEBUG', 'ERROR', 'ERR invalid password')
+ r.execute_command("DEBUG", "ERROR", "ERR invalid password")
@pytest.mark.onlynoncluster
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
- return _get_client(redis.Redis,
- request,
- single_connection_client=False)
+ return _get_client(redis.Redis, request, single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
- assert r.set('a', '123')
- assert r.get('a') == b'123'
+ assert r.set("a", "123")
+ assert r.get("a") == b"123"
@pytest.mark.onlynoncluster
@@ -594,8 +602,7 @@ class TestHealthCheck:
@pytest.fixture()
def r(self, request):
- return _get_client(redis.Redis, request,
- health_check_interval=self.interval)
+ return _get_client(redis.Redis, request, health_check_interval=self.interval)
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - time.time()
@@ -608,61 +615,66 @@ class TestHealthCheck:
def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
- r.get('foo')
+ r.get("foo")
r.connection.next_health_check = time.time()
- with mock.patch.object(r.connection, 'send_command',
- wraps=r.connection.send_command) as m:
- r.get('foo')
- m.assert_called_with('PING', check_health=False)
+ with mock.patch.object(
+ r.connection, "send_command", wraps=r.connection.send_command
+ ) as m:
+ r.get("foo")
+ m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_advances_next_health_check(self, r):
- r.get('foo')
+ r.get("foo")
next_health_check = r.connection.next_health_check
- r.get('foo')
+ r.get("foo")
assert next_health_check < r.connection.next_health_check
def test_health_check_not_invoked_within_interval(self, r):
- r.get('foo')
- with mock.patch.object(r.connection, 'send_command',
- wraps=r.connection.send_command) as m:
- r.get('foo')
- ping_call_spec = (('PING',), {'check_health': False})
+ r.get("foo")
+ with mock.patch.object(
+ r.connection, "send_command", wraps=r.connection.send_command
+ ) as m:
+ r.get("foo")
+ ping_call_spec = (("PING",), {"check_health": False})
assert ping_call_spec not in m.call_args_list
def test_health_check_in_pipeline(self, r):
with r.pipeline(transaction=False) as pipe:
- pipe.connection = pipe.connection_pool.get_connection('_')
+ pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
- with mock.patch.object(pipe.connection, 'send_command',
- wraps=pipe.connection.send_command) as m:
- responses = pipe.set('foo', 'bar').get('foo').execute()
- m.assert_any_call('PING', check_health=False)
- assert responses == [True, b'bar']
+ with mock.patch.object(
+ pipe.connection, "send_command", wraps=pipe.connection.send_command
+ ) as m:
+ responses = pipe.set("foo", "bar").get("foo").execute()
+ m.assert_any_call("PING", check_health=False)
+ assert responses == [True, b"bar"]
def test_health_check_in_transaction(self, r):
with r.pipeline(transaction=True) as pipe:
- pipe.connection = pipe.connection_pool.get_connection('_')
+ pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
- with mock.patch.object(pipe.connection, 'send_command',
- wraps=pipe.connection.send_command) as m:
- responses = pipe.set('foo', 'bar').get('foo').execute()
- m.assert_any_call('PING', check_health=False)
- assert responses == [True, b'bar']
+ with mock.patch.object(
+ pipe.connection, "send_command", wraps=pipe.connection.send_command
+ ) as m:
+ responses = pipe.set("foo", "bar").get("foo").execute()
+ m.assert_any_call("PING", check_health=False)
+ assert responses == [True, b"bar"]
def test_health_check_in_watched_pipeline(self, r):
- r.set('foo', 'bar')
+ r.set("foo", "bar")
with r.pipeline(transaction=False) as pipe:
- pipe.connection = pipe.connection_pool.get_connection('_')
+ pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
- with mock.patch.object(pipe.connection, 'send_command',
- wraps=pipe.connection.send_command) as m:
- pipe.watch('foo')
+ with mock.patch.object(
+ pipe.connection, "send_command", wraps=pipe.connection.send_command
+ ) as m:
+ pipe.watch("foo")
# the health check should be called when watching
- m.assert_called_with('PING', check_health=False)
+ m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(pipe.connection)
- assert pipe.get('foo') == b'bar'
+ assert pipe.get("foo") == b"bar"
# reset the mock to clear the call list and schedule another
# health check
@@ -670,27 +682,28 @@ class TestHealthCheck:
pipe.connection.next_health_check = 0
pipe.multi()
- responses = pipe.set('foo', 'not-bar').get('foo').execute()
- assert responses == [True, b'not-bar']
- m.assert_any_call('PING', check_health=False)
+ responses = pipe.set("foo", "not-bar").get("foo").execute()
+ assert responses == [True, b"not-bar"]
+ m.assert_any_call("PING", check_health=False)
def test_health_check_in_pubsub_before_subscribe(self, r):
"A health check happens before the first [p]subscribe"
p = r.pubsub()
- p.connection = p.connection_pool.get_connection('_')
+ p.connection = p.connection_pool.get_connection("_")
p.connection.next_health_check = 0
- with mock.patch.object(p.connection, 'send_command',
- wraps=p.connection.send_command) as m:
+ with mock.patch.object(
+ p.connection, "send_command", wraps=p.connection.send_command
+ ) as m:
assert not p.subscribed
- p.subscribe('foo')
+ p.subscribe("foo")
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
- m.assert_any_call('PING', check_health=False)
+ m.assert_any_call("PING", check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = wait_for_message(p)
- assert subscribe_message['type'] == 'subscribe'
+ assert subscribe_message["type"] == "subscribe"
def test_health_check_in_pubsub_after_subscribed(self, r):
"""
@@ -698,38 +711,38 @@ class TestHealthCheck:
connection health
"""
p = r.pubsub()
- p.connection = p.connection_pool.get_connection('_')
+ p.connection = p.connection_pool.get_connection("_")
p.connection.next_health_check = 0
- with mock.patch.object(p.connection, 'send_command',
- wraps=p.connection.send_command) as m:
- p.subscribe('foo')
+ with mock.patch.object(
+ p.connection, "send_command", wraps=p.connection.send_command
+ ) as m:
+ p.subscribe("foo")
subscribe_message = wait_for_message(p)
- assert subscribe_message['type'] == 'subscribe'
+ assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
- m.assert_any_call('PING', check_health=False)
+ m.assert_any_call("PING", check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
- p.subscribe('bar')
+ p.subscribe("bar")
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
- m.assert_called_once_with('SUBSCRIBE', 'bar', check_health=False)
+ m.assert_called_once_with("SUBSCRIBE", "bar", check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = wait_for_message(p)
- assert subscribe_message['type'] == 'subscribe'
+ assert subscribe_message["type"] == "subscribe"
assert wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
- m.assert_any_call('PING', p.HEALTH_CHECK_MESSAGE,
- check_health=False)
+ m.assert_any_call("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
def test_health_check_in_pubsub_poll(self, r):
@@ -738,12 +751,13 @@ class TestHealthCheck:
check the connection's health.
"""
p = r.pubsub()
- p.connection = p.connection_pool.get_connection('_')
- with mock.patch.object(p.connection, 'send_command',
- wraps=p.connection.send_command) as m:
- p.subscribe('foo')
+ p.connection = p.connection_pool.get_connection("_")
+ with mock.patch.object(
+ p.connection, "send_command", wraps=p.connection.send_command
+ ) as m:
+ p.subscribe("foo")
subscribe_message = wait_for_message(p)
- assert subscribe_message['type'] == 'subscribe'
+ assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
@@ -759,6 +773,5 @@ class TestHealthCheck:
# should be advanced
p.connection.next_health_check = 0
assert wait_for_message(p) is None
- m.assert_called_with('PING', p.HEALTH_CHECK_MESSAGE,
- check_health=False)
+ m.assert_called_with("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
diff --git a/tests/test_encoding.py b/tests/test_encoding.py
index 706654f..bd0f09f 100644
--- a/tests/test_encoding.py
+++ b/tests/test_encoding.py
@@ -1,7 +1,8 @@
import pytest
-import redis
+import redis
from redis.connection import Connection
+
from .conftest import _get_client
@@ -19,62 +20,70 @@ class TestEncoding:
)
def test_simple_encoding(self, r_no_decode):
- unicode_string = chr(3456) + 'abcd' + chr(3421)
- r_no_decode['unicode-string'] = unicode_string.encode('utf-8')
- cached_val = r_no_decode['unicode-string']
+ unicode_string = chr(3456) + "abcd" + chr(3421)
+ r_no_decode["unicode-string"] = unicode_string.encode("utf-8")
+ cached_val = r_no_decode["unicode-string"]
assert isinstance(cached_val, bytes)
- assert unicode_string == cached_val.decode('utf-8')
+ assert unicode_string == cached_val.decode("utf-8")
def test_simple_encoding_and_decoding(self, r):
- unicode_string = chr(3456) + 'abcd' + chr(3421)
- r['unicode-string'] = unicode_string
- cached_val = r['unicode-string']
+ unicode_string = chr(3456) + "abcd" + chr(3421)
+ r["unicode-string"] = unicode_string
+ cached_val = r["unicode-string"]
assert isinstance(cached_val, str)
assert unicode_string == cached_val
def test_memoryview_encoding(self, r_no_decode):
- unicode_string = chr(3456) + 'abcd' + chr(3421)
- unicode_string_view = memoryview(unicode_string.encode('utf-8'))
- r_no_decode['unicode-string-memoryview'] = unicode_string_view
- cached_val = r_no_decode['unicode-string-memoryview']
+ unicode_string = chr(3456) + "abcd" + chr(3421)
+ unicode_string_view = memoryview(unicode_string.encode("utf-8"))
+ r_no_decode["unicode-string-memoryview"] = unicode_string_view
+ cached_val = r_no_decode["unicode-string-memoryview"]
# The cached value won't be a memoryview because it's a copy from Redis
assert isinstance(cached_val, bytes)
- assert unicode_string == cached_val.decode('utf-8')
+ assert unicode_string == cached_val.decode("utf-8")
def test_memoryview_encoding_and_decoding(self, r):
- unicode_string = chr(3456) + 'abcd' + chr(3421)
- unicode_string_view = memoryview(unicode_string.encode('utf-8'))
- r['unicode-string-memoryview'] = unicode_string_view
- cached_val = r['unicode-string-memoryview']
+ unicode_string = chr(3456) + "abcd" + chr(3421)
+ unicode_string_view = memoryview(unicode_string.encode("utf-8"))
+ r["unicode-string-memoryview"] = unicode_string_view
+ cached_val = r["unicode-string-memoryview"]
assert isinstance(cached_val, str)
assert unicode_string == cached_val
def test_list_encoding(self, r):
- unicode_string = chr(3456) + 'abcd' + chr(3421)
+ unicode_string = chr(3456) + "abcd" + chr(3421)
result = [unicode_string, unicode_string, unicode_string]
- r.rpush('a', *result)
- assert r.lrange('a', 0, -1) == result
+ r.rpush("a", *result)
+ assert r.lrange("a", 0, -1) == result
class TestEncodingErrors:
def test_ignore(self, request):
- r = _get_client(redis.Redis, request=request, decode_responses=True,
- encoding_errors='ignore')
- r.set('a', b'foo\xff')
- assert r.get('a') == 'foo'
+ r = _get_client(
+ redis.Redis,
+ request=request,
+ decode_responses=True,
+ encoding_errors="ignore",
+ )
+ r.set("a", b"foo\xff")
+ assert r.get("a") == "foo"
def test_replace(self, request):
- r = _get_client(redis.Redis, request=request, decode_responses=True,
- encoding_errors='replace')
- r.set('a', b'foo\xff')
- assert r.get('a') == 'foo\ufffd'
+ r = _get_client(
+ redis.Redis,
+ request=request,
+ decode_responses=True,
+ encoding_errors="replace",
+ )
+ r.set("a", b"foo\xff")
+ assert r.get("a") == "foo\ufffd"
class TestMemoryviewsAreNotPacked:
def test_memoryviews_are_not_packed(self):
c = Connection()
- arg = memoryview(b'some_arg')
- arg_list = ['SOME_COMMAND', arg]
+ arg = memoryview(b"some_arg")
+ arg_list = ["SOME_COMMAND", arg]
cmd = c.pack_command(*arg_list)
assert cmd[1] is arg
cmds = c.pack_commands([arg_list, arg_list])
@@ -85,25 +94,25 @@ class TestMemoryviewsAreNotPacked:
class TestCommandsAreNotEncoded:
@pytest.fixture()
def r(self, request):
- return _get_client(redis.Redis, request=request, encoding='utf-16')
+ return _get_client(redis.Redis, request=request, encoding="utf-16")
def test_basic_command(self, r):
- r.set('hello', 'world')
+ r.set("hello", "world")
class TestInvalidUserInput:
def test_boolean_fails(self, r):
with pytest.raises(redis.DataError):
- r.set('a', True)
+ r.set("a", True)
def test_none_fails(self, r):
with pytest.raises(redis.DataError):
- r.set('a', None)
+ r.set("a", None)
def test_user_type_fails(self, r):
class Foo:
def __str__(self):
- return 'Foo'
+ return "Foo"
with pytest.raises(redis.DataError):
- r.set('a', Foo())
+ r.set("a", Foo())
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 402eccf..3595829 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -1,19 +1,20 @@
import string
+
from redis.commands.helpers import (
delist,
list_or_args,
nativestr,
+ parse_to_dict,
parse_to_list,
quote_string,
random_string,
- parse_to_dict
)
def test_list_or_args():
k = ["hello, world"]
a = ["some", "argument", "list"]
- assert list_or_args(k, a) == k+a
+ assert list_or_args(k, a) == k + a
for i in ["banana", b"banana"]:
assert list_or_args(i, a) == [i] + a
@@ -22,42 +23,50 @@ def test_list_or_args():
def test_parse_to_list():
assert parse_to_list(None) == []
r = ["hello", b"my name", "45", "555.55", "is simon!", None]
- assert parse_to_list(r) == \
- ["hello", "my name", 45, 555.55, "is simon!", None]
+ assert parse_to_list(r) == ["hello", "my name", 45, 555.55, "is simon!", None]
def test_parse_to_dict():
assert parse_to_dict(None) == {}
- r = [['Some number', '1.0345'],
- ['Some string', 'hello'],
- ['Child iterators',
- ['Time', '0.2089', 'Counter', 3, 'Child iterators',
- ['Type', 'bar', 'Time', '0.0729', 'Counter', 3],
- ['Type', 'barbar', 'Time', '0.058', 'Counter', 3]]]]
+ r = [
+ ["Some number", "1.0345"],
+ ["Some string", "hello"],
+ [
+ "Child iterators",
+ [
+ "Time",
+ "0.2089",
+ "Counter",
+ 3,
+ "Child iterators",
+ ["Type", "bar", "Time", "0.0729", "Counter", 3],
+ ["Type", "barbar", "Time", "0.058", "Counter", 3],
+ ],
+ ],
+ ]
assert parse_to_dict(r) == {
- 'Child iterators': {
- 'Child iterators': [
- {'Counter': 3.0, 'Time': 0.0729, 'Type': 'bar'},
- {'Counter': 3.0, 'Time': 0.058, 'Type': 'barbar'}
+ "Child iterators": {
+ "Child iterators": [
+ {"Counter": 3.0, "Time": 0.0729, "Type": "bar"},
+ {"Counter": 3.0, "Time": 0.058, "Type": "barbar"},
],
- 'Counter': 3.0,
- 'Time': 0.2089
+ "Counter": 3.0,
+ "Time": 0.2089,
},
- 'Some number': 1.0345,
- 'Some string': 'hello'
+ "Some number": 1.0345,
+ "Some string": "hello",
}
def test_nativestr():
- assert nativestr('teststr') == 'teststr'
- assert nativestr(b'teststr') == 'teststr'
- assert nativestr('null') is None
+ assert nativestr("teststr") == "teststr"
+ assert nativestr(b"teststr") == "teststr"
+ assert nativestr("null") is None
def test_delist():
assert delist(None) is None
- assert delist([b'hello', 'world', b'banana']) == \
- ['hello', 'world', 'banana']
+ assert delist([b"hello", "world", b"banana"]) == ["hello", "world", "banana"]
def test_random_string():
@@ -69,5 +78,5 @@ def test_random_string():
def test_quote_string():
assert quote_string("hello world!") == '"hello world!"'
- assert quote_string('') == '""'
- assert quote_string('hello world!') == '"hello world!"'
+ assert quote_string("") == '""'
+ assert quote_string("hello world!") == '"hello world!"'
diff --git a/tests/test_json.py b/tests/test_json.py
index 187bfe2..1686f9d 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -1,8 +1,10 @@
import pytest
+
import redis
-from redis.commands.json.path import Path
from redis import exceptions
-from redis.commands.json.decoders import unstring, decode_list
+from redis.commands.json.decoders import decode_list, unstring
+from redis.commands.json.path import Path
+
from .conftest import skip_ifmodversion_lt
@@ -48,9 +50,7 @@ def test_json_get_jset(client):
@pytest.mark.redismod
def test_nonascii_setgetdelete(client):
assert client.json().set("notascii", Path.rootPath(), "hyvää-élève")
- assert "hyvää-élève" == client.json().get(
- "notascii",
- no_escape=True)
+ assert "hyvää-élève" == client.json().get("notascii", no_escape=True)
assert 1 == client.json().delete("notascii")
assert client.exists("notascii") == 0
@@ -179,7 +179,7 @@ def test_arrinsert(client):
1,
2,
3,
- ]
+ ],
)
assert [0, 1, 2, 3, 4] == client.json().get("arr")
@@ -307,8 +307,7 @@ def test_json_delete_with_dollar(client):
r = client.json().get("doc1", "$")
assert r == [{"nested": {"b": 3}}]
- doc2 = {"a": {"a": 2, "b": 3}, "b": [
- "a", "b"], "nested": {"b": [True, "a", "b"]}}
+ doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
assert client.json().set("doc2", "$", doc2)
assert client.json().delete("doc2", "$..a") == 1
res = client.json().get("doc2", "$")
@@ -361,8 +360,7 @@ def test_json_forget_with_dollar(client):
r = client.json().get("doc1", "$")
assert r == [{"nested": {"b": 3}}]
- doc2 = {"a": {"a": 2, "b": 3}, "b": [
- "a", "b"], "nested": {"b": [True, "a", "b"]}}
+ doc2 = {"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [True, "a", "b"]}}
assert client.json().set("doc2", "$", doc2)
assert client.json().forget("doc2", "$..a") == 1
res = client.json().get("doc2", "$")
@@ -413,16 +411,12 @@ def test_json_mget_dollar(client):
client.json().set(
"doc1",
"$",
- {"a": 1,
- "b": 2,
- "nested": {"a": 3},
- "c": None, "nested2": {"a": None}},
+ {"a": 1, "b": 2, "nested": {"a": 3}, "c": None, "nested2": {"a": None}},
)
client.json().set(
"doc2",
"$",
- {"a": 4, "b": 5, "nested": {"a": 6},
- "c": None, "nested2": {"a": [None]}},
+ {"a": 4, "b": 5, "nested": {"a": 6}, "c": None, "nested2": {"a": [None]}},
)
# Compare also to single JSON.GET
assert client.json().get("doc1", "$..a") == [1, 3, None]
@@ -431,8 +425,7 @@ def test_json_mget_dollar(client):
# Test mget with single path
client.json().mget("doc1", "$..a") == [1, 3, None]
# Test mget with multi path
- client.json().mget(["doc1", "doc2"], "$..a") == [
- [1, 3, None], [4, 6, [None]]]
+ client.json().mget(["doc1", "doc2"], "$..a") == [[1, 3, None], [4, 6, [None]]]
# Test missing key
client.json().mget(["doc1", "missing_doc"], "$..a") == [[1, 3, None], None]
@@ -444,15 +437,11 @@ def test_json_mget_dollar(client):
def test_numby_commands_dollar(client):
# Test NUMINCRBY
- client.json().set(
- "doc1",
- "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
+ client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
# Test multi
- assert client.json().numincrby("doc1", "$..a", 2) == \
- [None, 4, 7.0, None]
+ assert client.json().numincrby("doc1", "$..a", 2) == [None, 4, 7.0, None]
- assert client.json().numincrby("doc1", "$..a", 2.5) == \
- [None, 6.5, 9.5, None]
+ assert client.json().numincrby("doc1", "$..a", 2.5) == [None, 6.5, 9.5, None]
# Test single
assert client.json().numincrby("doc1", "$.b[1].a", 2) == [11.5]
@@ -460,15 +449,12 @@ def test_numby_commands_dollar(client):
assert client.json().numincrby("doc1", "$.b[1].a", 3.5) == [15.0]
# Test NUMMULTBY
- client.json().set("doc1", "$", {"a": "b", "b": [
- {"a": 2}, {"a": 5.0}, {"a": "c"}]})
+ client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
# test list
with pytest.deprecated_call():
- assert client.json().nummultby("doc1", "$..a", 2) == \
- [None, 4, 10, None]
- assert client.json().nummultby("doc1", "$..a", 2.5) == \
- [None, 10.0, 25.0, None]
+ assert client.json().nummultby("doc1", "$..a", 2) == [None, 4, 10, None]
+ assert client.json().nummultby("doc1", "$..a", 2.5) == [None, 10.0, 25.0, None]
# Test single
with pytest.deprecated_call():
@@ -482,13 +468,11 @@ def test_numby_commands_dollar(client):
client.json().nummultby("non_existing_doc", "$..a", 2)
# Test legacy NUMINCRBY
- client.json().set("doc1", "$", {"a": "b", "b": [
- {"a": 2}, {"a": 5.0}, {"a": "c"}]})
+ client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
client.json().numincrby("doc1", ".b[0].a", 3) == 5
# Test legacy NUMMULTBY
- client.json().set("doc1", "$", {"a": "b", "b": [
- {"a": 2}, {"a": 5.0}, {"a": "c"}]})
+ client.json().set("doc1", "$", {"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]})
with pytest.deprecated_call():
client.json().nummultby("doc1", ".b[0].a", 3) == 6
@@ -498,8 +482,7 @@ def test_numby_commands_dollar(client):
def test_strappend_dollar(client):
client.json().set(
- "doc1", "$", {"a": "foo", "nested1": {
- "a": "hello"}, "nested2": {"a": 31}}
+ "doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
)
# Test multi
client.json().strappend("doc1", "bar", "$..a") == [6, 8, None]
@@ -534,8 +517,7 @@ def test_strlen_dollar(client):
# Test multi
client.json().set(
- "doc1", "$", {"a": "foo", "nested1": {
- "a": "hello"}, "nested2": {"a": 31}}
+ "doc1", "$", {"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}
)
assert client.json().strlen("doc1", "$..a") == [3, 5, None]
@@ -634,8 +616,7 @@ def test_arrinsert_dollar(client):
},
)
# Test multi
- assert client.json().arrinsert("doc1", "$..a", "1",
- "bar", "racuda") == [3, 5, None]
+ assert client.json().arrinsert("doc1", "$..a", "1", "bar", "racuda") == [3, 5, None]
assert client.json().get("doc1", "$") == [
{
@@ -674,8 +655,11 @@ def test_arrlen_dollar(client):
# Test multi
assert client.json().arrlen("doc1", "$..a") == [1, 3, None]
- assert client.json().arrappend("doc1", "$..a", "non", "abba", "stanza") \
- == [4, 6, None]
+ assert client.json().arrappend("doc1", "$..a", "non", "abba", "stanza") == [
+ 4,
+ 6,
+ None,
+ ]
client.json().clear("doc1", "$.a")
assert client.json().arrlen("doc1", "$..a") == [0, 6, None]
@@ -924,8 +908,7 @@ def test_clear_dollar(client):
assert client.json().clear("doc1", "$..a") == 3
assert client.json().get("doc1", "$") == [
- {"nested1": {"a": {}}, "a": [], "nested2": {
- "a": "claro"}, "nested3": {"a": {}}}
+ {"nested1": {"a": {}}, "a": [], "nested2": {"a": "claro"}, "nested3": {"a": {}}}
]
# Test single
@@ -994,8 +977,7 @@ def test_debug_dollar(client):
client.json().set("doc1", "$", jdata)
# Test multi
- assert client.json().debug("MEMORY", "doc1", "$..a") == [
- 72, 24, 24, 16, 16, 1, 0]
+ assert client.json().debug("MEMORY", "doc1", "$..a") == [72, 24, 24, 16, 16, 1, 0]
# Test single
assert client.json().debug("MEMORY", "doc1", "$.nested2.a") == [24]
@@ -1234,12 +1216,10 @@ def test_arrindex_dollar(client):
[],
]
- assert client.json().arrindex("test_num", "$..arr", 3) == [
- 3, 2, -1, None, -1]
+ assert client.json().arrindex("test_num", "$..arr", 3) == [3, 2, -1, None, -1]
# Test index of double scalar in multi values
- assert client.json().arrindex("test_num", "$..arr", 3.0) == [
- 2, 8, -1, None, -1]
+ assert client.json().arrindex("test_num", "$..arr", 3.0) == [2, 8, -1, None, -1]
# Test index of string scalar in multi values
client.json().set(
@@ -1249,10 +1229,7 @@ def test_arrindex_dollar(client):
{"arr": ["bazzz", "bar", 2, "baz", 2, "ba", "baz", 3]},
{
"nested1_found": {
- "arr": [
- None,
- "baz2",
- "buzz", 2, 1, 0, 1, "2", "baz", 2, 4, 5]
+ "arr": [None, "baz2", "buzz", 2, 1, 0, 1, "2", "baz", 2, 4, 5]
}
},
{"nested2_not_found": {"arr": ["baz2", 4, 6]}},
@@ -1344,11 +1321,7 @@ def test_arrindex_dollar(client):
{"arr": ["bazzz", "None", 2, None, 2, "ba", "baz", 3]},
{
"nested1_found": {
- "arr": [
- "zaz",
- "baz2",
- "buzz",
- 2, 1, 0, 1, "2", None, 2, 4, 5]
+ "arr": ["zaz", "baz2", "buzz", 2, 1, 0, 1, "2", None, 2, 4, 5]
}
},
{"nested2_not_found": {"arr": ["None", 4, 6]}},
@@ -1369,8 +1342,7 @@ def test_arrindex_dollar(client):
# Fail with none-scalar value
with pytest.raises(exceptions.ResponseError):
- client.json().arrindex(
- "test_None", "$..nested42_empty_arr.arr", {"arr": []})
+ client.json().arrindex("test_None", "$..nested42_empty_arr.arr", {"arr": []})
# Do not fail with none-scalar value in legacy mode
assert (
@@ -1392,10 +1364,7 @@ def test_arrindex_dollar(client):
assert client.json().arrindex("test_string", ".[0].arr", "faz") == -1
# Test index of None scalar in single value
assert client.json().arrindex("test_None", ".[0].arr", "None") == 1
- assert client.json().arrindex(
- "test_None",
- "..nested2_not_found.arr",
- "None") == 0
+ assert client.json().arrindex("test_None", "..nested2_not_found.arr", "None") == 0
@pytest.mark.redismod
@@ -1406,14 +1375,15 @@ def test_decoders_and_unstring():
assert decode_list(b"45.55") == 45.55
assert decode_list("45.55") == 45.55
- assert decode_list(['hello', b'world']) == ['hello', 'world']
+ assert decode_list(["hello", b"world"]) == ["hello", "world"]
@pytest.mark.redismod
def test_custom_decoder(client):
- import ujson
import json
+ import ujson
+
cj = client.json(encoder=ujson, decoder=ujson)
assert cj.set("foo", Path.rootPath(), "bar")
assert "bar" == cj.get("foo")
diff --git a/tests/test_lock.py b/tests/test_lock.py
index 66148ed..02cca1b 100644
--- a/tests/test_lock.py
+++ b/tests/test_lock.py
@@ -1,9 +1,11 @@
-import pytest
import time
-from redis.exceptions import LockError, LockNotOwnedError
+import pytest
+
from redis.client import Redis
+from redis.exceptions import LockError, LockNotOwnedError
from redis.lock import Lock
+
from .conftest import _get_client
@@ -14,36 +16,36 @@ class TestLock:
return _get_client(Redis, request=request, decode_responses=True)
def get_lock(self, redis, *args, **kwargs):
- kwargs['lock_class'] = Lock
+ kwargs["lock_class"] = Lock
return redis.lock(*args, **kwargs)
def test_lock(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
assert lock.acquire(blocking=False)
- assert r.get('foo') == lock.local.token
- assert r.ttl('foo') == -1
+ assert r.get("foo") == lock.local.token
+ assert r.ttl("foo") == -1
lock.release()
- assert r.get('foo') is None
+ assert r.get("foo") is None
def test_lock_token(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
self._test_lock_token(r, lock)
def test_lock_token_thread_local_false(self, r):
- lock = self.get_lock(r, 'foo', thread_local=False)
+ lock = self.get_lock(r, "foo", thread_local=False)
self._test_lock_token(r, lock)
def _test_lock_token(self, r, lock):
- assert lock.acquire(blocking=False, token='test')
- assert r.get('foo') == b'test'
- assert lock.local.token == b'test'
- assert r.ttl('foo') == -1
+ assert lock.acquire(blocking=False, token="test")
+ assert r.get("foo") == b"test"
+ assert lock.local.token == b"test"
+ assert r.ttl("foo") == -1
lock.release()
- assert r.get('foo') is None
+ assert r.get("foo") is None
assert lock.local.token is None
def test_locked(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
assert lock.locked() is False
lock.acquire(blocking=False)
assert lock.locked() is True
@@ -51,14 +53,14 @@ class TestLock:
assert lock.locked() is False
def _test_owned(self, client):
- lock = self.get_lock(client, 'foo')
+ lock = self.get_lock(client, "foo")
assert lock.owned() is False
lock.acquire(blocking=False)
assert lock.owned() is True
lock.release()
assert lock.owned() is False
- lock2 = self.get_lock(client, 'foo')
+ lock2 = self.get_lock(client, "foo")
assert lock.owned() is False
assert lock2.owned() is False
lock2.acquire(blocking=False)
@@ -75,8 +77,8 @@ class TestLock:
self._test_owned(r_decoded)
def test_competing_locks(self, r):
- lock1 = self.get_lock(r, 'foo')
- lock2 = self.get_lock(r, 'foo')
+ lock1 = self.get_lock(r, "foo")
+ lock2 = self.get_lock(r, "foo")
assert lock1.acquire(blocking=False)
assert not lock2.acquire(blocking=False)
lock1.release()
@@ -85,23 +87,23 @@ class TestLock:
lock2.release()
def test_timeout(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
- assert 8 < r.ttl('foo') <= 10
+ assert 8 < r.ttl("foo") <= 10
lock.release()
def test_float_timeout(self, r):
- lock = self.get_lock(r, 'foo', timeout=9.5)
+ lock = self.get_lock(r, "foo", timeout=9.5)
assert lock.acquire(blocking=False)
- assert 8 < r.pttl('foo') <= 9500
+ assert 8 < r.pttl("foo") <= 9500
lock.release()
def test_blocking_timeout(self, r):
- lock1 = self.get_lock(r, 'foo')
+ lock1 = self.get_lock(r, "foo")
assert lock1.acquire(blocking=False)
bt = 0.2
sleep = 0.05
- lock2 = self.get_lock(r, 'foo', sleep=sleep, blocking_timeout=bt)
+ lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
start = time.monotonic()
assert not lock2.acquire()
# The elapsed duration should be less than the total blocking_timeout
@@ -111,22 +113,22 @@ class TestLock:
def test_context_manager(self, r):
# blocking_timeout prevents a deadlock if the lock can't be acquired
# for some reason
- with self.get_lock(r, 'foo', blocking_timeout=0.2) as lock:
- assert r.get('foo') == lock.local.token
- assert r.get('foo') is None
+ with self.get_lock(r, "foo", blocking_timeout=0.2) as lock:
+ assert r.get("foo") == lock.local.token
+ assert r.get("foo") is None
def test_context_manager_raises_when_locked_not_acquired(self, r):
- r.set('foo', 'bar')
+ r.set("foo", "bar")
with pytest.raises(LockError):
- with self.get_lock(r, 'foo', blocking_timeout=0.1):
+ with self.get_lock(r, "foo", blocking_timeout=0.1):
pass
def test_high_sleep_small_blocking_timeout(self, r):
- lock1 = self.get_lock(r, 'foo')
+ lock1 = self.get_lock(r, "foo")
assert lock1.acquire(blocking=False)
sleep = 60
bt = 1
- lock2 = self.get_lock(r, 'foo', sleep=sleep, blocking_timeout=bt)
+ lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
start = time.monotonic()
assert not lock2.acquire()
# the elapsed timed is less than the blocking_timeout as the lock is
@@ -135,88 +137,88 @@ class TestLock:
lock1.release()
def test_releasing_unlocked_lock_raises_error(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
with pytest.raises(LockError):
lock.release()
def test_releasing_lock_no_longer_owned_raises_error(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
lock.acquire(blocking=False)
# manually change the token
- r.set('foo', 'a')
+ r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
lock.release()
# even though we errored, the token is still cleared
assert lock.local.token is None
def test_extend_lock(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
- assert 8000 < r.pttl('foo') <= 10000
+ assert 8000 < r.pttl("foo") <= 10000
assert lock.extend(10)
- assert 16000 < r.pttl('foo') <= 20000
+ assert 16000 < r.pttl("foo") <= 20000
lock.release()
def test_extend_lock_replace_ttl(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
- assert 8000 < r.pttl('foo') <= 10000
+ assert 8000 < r.pttl("foo") <= 10000
assert lock.extend(10, replace_ttl=True)
- assert 8000 < r.pttl('foo') <= 10000
+ assert 8000 < r.pttl("foo") <= 10000
lock.release()
def test_extend_lock_float(self, r):
- lock = self.get_lock(r, 'foo', timeout=10.0)
+ lock = self.get_lock(r, "foo", timeout=10.0)
assert lock.acquire(blocking=False)
- assert 8000 < r.pttl('foo') <= 10000
+ assert 8000 < r.pttl("foo") <= 10000
assert lock.extend(10.0)
- assert 16000 < r.pttl('foo') <= 20000
+ assert 16000 < r.pttl("foo") <= 20000
lock.release()
def test_extending_unlocked_lock_raises_error(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
with pytest.raises(LockError):
lock.extend(10)
def test_extending_lock_with_no_timeout_raises_error(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
assert lock.acquire(blocking=False)
with pytest.raises(LockError):
lock.extend(10)
lock.release()
def test_extending_lock_no_longer_owned_raises_error(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
- r.set('foo', 'a')
+ r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
lock.extend(10)
def test_reacquire_lock(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
- assert r.pexpire('foo', 5000)
- assert r.pttl('foo') <= 5000
+ assert r.pexpire("foo", 5000)
+ assert r.pttl("foo") <= 5000
assert lock.reacquire()
- assert 8000 < r.pttl('foo') <= 10000
+ assert 8000 < r.pttl("foo") <= 10000
lock.release()
def test_reacquiring_unlocked_lock_raises_error(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
with pytest.raises(LockError):
lock.reacquire()
def test_reacquiring_lock_with_no_timeout_raises_error(self, r):
- lock = self.get_lock(r, 'foo')
+ lock = self.get_lock(r, "foo")
assert lock.acquire(blocking=False)
with pytest.raises(LockError):
lock.reacquire()
lock.release()
def test_reacquiring_lock_no_longer_owned_raises_error(self, r):
- lock = self.get_lock(r, 'foo', timeout=10)
+ lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
- r.set('foo', 'a')
+ r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
lock.reacquire()
@@ -228,5 +230,6 @@ class TestLockClassSelection:
def __init__(self, *args, **kwargs):
pass
- lock = r.lock('foo', lock_class=MyLock)
+
+ lock = r.lock("foo", lock_class=MyLock)
assert type(lock) == MyLock
diff --git a/tests/test_monitor.py b/tests/test_monitor.py
index 6c3ea33..40d9e43 100644
--- a/tests/test_monitor.py
+++ b/tests/test_monitor.py
@@ -1,8 +1,9 @@
import pytest
+
from .conftest import (
skip_if_redis_enterprise,
skip_ifnot_redis_enterprise,
- wait_for_command
+ wait_for_command,
)
@@ -11,56 +12,56 @@ class TestMonitor:
def test_wait_command_not_found(self, r):
"Make sure the wait_for_command func works when command is not found"
with r.monitor() as m:
- response = wait_for_command(r, m, 'nothing')
+ response = wait_for_command(r, m, "nothing")
assert response is None
def test_response_values(self, r):
- db = r.connection_pool.connection_kwargs.get('db', 0)
+ db = r.connection_pool.connection_kwargs.get("db", 0)
with r.monitor() as m:
r.ping()
- response = wait_for_command(r, m, 'PING')
- assert isinstance(response['time'], float)
- assert response['db'] == db
- assert response['client_type'] in ('tcp', 'unix')
- assert isinstance(response['client_address'], str)
- assert isinstance(response['client_port'], str)
- assert response['command'] == 'PING'
+ response = wait_for_command(r, m, "PING")
+ assert isinstance(response["time"], float)
+ assert response["db"] == db
+ assert response["client_type"] in ("tcp", "unix")
+ assert isinstance(response["client_address"], str)
+ assert isinstance(response["client_port"], str)
+ assert response["command"] == "PING"
def test_command_with_quoted_key(self, r):
with r.monitor() as m:
r.get('foo"bar')
response = wait_for_command(r, m, 'GET foo"bar')
- assert response['command'] == 'GET foo"bar'
+ assert response["command"] == 'GET foo"bar'
def test_command_with_binary_data(self, r):
with r.monitor() as m:
- byte_string = b'foo\x92'
+ byte_string = b"foo\x92"
r.get(byte_string)
- response = wait_for_command(r, m, 'GET foo\\x92')
- assert response['command'] == 'GET foo\\x92'
+ response = wait_for_command(r, m, "GET foo\\x92")
+ assert response["command"] == "GET foo\\x92"
def test_command_with_escaped_data(self, r):
with r.monitor() as m:
- byte_string = b'foo\\x92'
+ byte_string = b"foo\\x92"
r.get(byte_string)
- response = wait_for_command(r, m, 'GET foo\\\\x92')
- assert response['command'] == 'GET foo\\\\x92'
+ response = wait_for_command(r, m, "GET foo\\\\x92")
+ assert response["command"] == "GET foo\\\\x92"
@skip_if_redis_enterprise
def test_lua_script(self, r):
with r.monitor() as m:
script = 'return redis.call("GET", "foo")'
assert r.eval(script, 0) is None
- response = wait_for_command(r, m, 'GET foo')
- assert response['command'] == 'GET foo'
- assert response['client_type'] == 'lua'
- assert response['client_address'] == 'lua'
- assert response['client_port'] == ''
+ response = wait_for_command(r, m, "GET foo")
+ assert response["command"] == "GET foo"
+ assert response["client_type"] == "lua"
+ assert response["client_address"] == "lua"
+ assert response["client_port"] == ""
@skip_ifnot_redis_enterprise
def test_lua_script_in_enterprise(self, r):
with r.monitor() as m:
script = 'return redis.call("GET", "foo")'
assert r.eval(script, 0) is None
- response = wait_for_command(r, m, 'GET foo')
+ response = wait_for_command(r, m, "GET foo")
assert response is None
diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py
index 5968b2b..32f5e23 100644
--- a/tests/test_multiprocessing.py
+++ b/tests/test_multiprocessing.py
@@ -1,6 +1,7 @@
-import pytest
-import multiprocessing
import contextlib
+import multiprocessing
+
+import pytest
import redis
from redis.connection import Connection, ConnectionPool
@@ -25,10 +26,7 @@ class TestMultiprocessing:
# actually fork/process-safe
@pytest.fixture()
def r(self, request):
- return _get_client(
- redis.Redis,
- request=request,
- single_connection_client=False)
+ return _get_client(redis.Redis, request=request, single_connection_client=False)
def test_close_connection_in_child(self, master_host):
"""
@@ -36,12 +34,12 @@ class TestMultiprocessing:
destroy the file descriptors so a parent can still use it.
"""
conn = Connection(host=master_host[0], port=master_host[1])
- conn.send_command('ping')
- assert conn.read_response() == b'PONG'
+ conn.send_command("ping")
+ assert conn.read_response() == b"PONG"
def target(conn):
- conn.send_command('ping')
- assert conn.read_response() == b'PONG'
+ conn.send_command("ping")
+ assert conn.read_response() == b"PONG"
conn.disconnect()
proc = multiprocessing.Process(target=target, args=(conn,))
@@ -53,8 +51,8 @@ class TestMultiprocessing:
# child. The child called socket.close() but did not call
# socket.shutdown() because it wasn't the "owning" process.
# Therefore the connection still works in the parent.
- conn.send_command('ping')
- assert conn.read_response() == b'PONG'
+ conn.send_command("ping")
+ assert conn.read_response() == b"PONG"
def test_close_connection_in_parent(self, master_host):
"""
@@ -62,8 +60,8 @@ class TestMultiprocessing:
(the owning process) closes the connection.
"""
conn = Connection(host=master_host[0], port=master_host[1])
- conn.send_command('ping')
- assert conn.read_response() == b'PONG'
+ conn.send_command("ping")
+ assert conn.read_response() == b"PONG"
def target(conn, ev):
ev.wait()
@@ -71,7 +69,7 @@ class TestMultiprocessing:
# connection, the connection is shutdown and the child
# cannot use it.
with pytest.raises(ConnectionError):
- conn.send_command('ping')
+ conn.send_command("ping")
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(conn, ev))
@@ -83,28 +81,30 @@ class TestMultiprocessing:
proc.join(3)
assert proc.exitcode == 0
- @pytest.mark.parametrize('max_connections', [1, 2, None])
+ @pytest.mark.parametrize("max_connections", [1, 2, None])
def test_pool(self, max_connections, master_host):
"""
A child will create its own connections when using a pool created
by a parent.
"""
- pool = ConnectionPool.from_url(f'redis://{master_host[0]}:{master_host[1]}',
- max_connections=max_connections)
+ pool = ConnectionPool.from_url(
+ f"redis://{master_host[0]}:{master_host[1]}",
+ max_connections=max_connections,
+ )
- conn = pool.get_connection('ping')
+ conn = pool.get_connection("ping")
main_conn_pid = conn.pid
with exit_callback(pool.release, conn):
- conn.send_command('ping')
- assert conn.read_response() == b'PONG'
+ conn.send_command("ping")
+ assert conn.read_response() == b"PONG"
def target(pool):
with exit_callback(pool.disconnect):
- conn = pool.get_connection('ping')
+ conn = pool.get_connection("ping")
assert conn.pid != main_conn_pid
with exit_callback(pool.release, conn):
- assert conn.send_command('ping') is None
- assert conn.read_response() == b'PONG'
+ assert conn.send_command("ping") is None
+ assert conn.read_response() == b"PONG"
proc = multiprocessing.Process(target=target, args=(pool,))
proc.start()
@@ -113,32 +113,34 @@ class TestMultiprocessing:
# Check that connection is still alive after fork process has exited
# and disconnected the connections in its pool
- conn = pool.get_connection('ping')
+ conn = pool.get_connection("ping")
with exit_callback(pool.release, conn):
- assert conn.send_command('ping') is None
- assert conn.read_response() == b'PONG'
+ assert conn.send_command("ping") is None
+ assert conn.read_response() == b"PONG"
- @pytest.mark.parametrize('max_connections', [1, 2, None])
+ @pytest.mark.parametrize("max_connections", [1, 2, None])
def test_close_pool_in_main(self, max_connections, master_host):
"""
A child process that uses the same pool as its parent isn't affected
when the parent disconnects all connections within the pool.
"""
- pool = ConnectionPool.from_url(f'redis://{master_host[0]}:{master_host[1]}',
- max_connections=max_connections)
+ pool = ConnectionPool.from_url(
+ f"redis://{master_host[0]}:{master_host[1]}",
+ max_connections=max_connections,
+ )
- conn = pool.get_connection('ping')
- assert conn.send_command('ping') is None
- assert conn.read_response() == b'PONG'
+ conn = pool.get_connection("ping")
+ assert conn.send_command("ping") is None
+ assert conn.read_response() == b"PONG"
def target(pool, disconnect_event):
- conn = pool.get_connection('ping')
+ conn = pool.get_connection("ping")
with exit_callback(pool.release, conn):
- assert conn.send_command('ping') is None
- assert conn.read_response() == b'PONG'
+ assert conn.send_command("ping") is None
+ assert conn.read_response() == b"PONG"
disconnect_event.wait()
- assert conn.send_command('ping') is None
- assert conn.read_response() == b'PONG'
+ assert conn.send_command("ping") is None
+ assert conn.read_response() == b"PONG"
ev = multiprocessing.Event()
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index a87ed71..0518893 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -1,7 +1,8 @@
import pytest
import redis
-from .conftest import wait_for_command, skip_if_server_version_lt
+
+from .conftest import skip_if_server_version_lt, wait_for_command
class TestPipeline:
@@ -12,31 +13,30 @@ class TestPipeline:
def test_pipeline(self, r):
with r.pipeline() as pipe:
- (pipe.set('a', 'a1')
- .get('a')
- .zadd('z', {'z1': 1})
- .zadd('z', {'z2': 4})
- .zincrby('z', 1, 'z1')
- .zrange('z', 0, 5, withscores=True))
- assert pipe.execute() == \
- [
- True,
- b'a1',
- True,
- True,
- 2.0,
- [(b'z1', 2.0), (b'z2', 4)],
- ]
+ (
+ pipe.set("a", "a1")
+ .get("a")
+ .zadd("z", {"z1": 1})
+ .zadd("z", {"z2": 4})
+ .zincrby("z", 1, "z1")
+ .zrange("z", 0, 5, withscores=True)
+ )
+ assert pipe.execute() == [
+ True,
+ b"a1",
+ True,
+ True,
+ 2.0,
+ [(b"z1", 2.0), (b"z2", 4)],
+ ]
def test_pipeline_memoryview(self, r):
with r.pipeline() as pipe:
- (pipe.set('a', memoryview(b'a1'))
- .get('a'))
- assert pipe.execute() == \
- [
- True,
- b'a1',
- ]
+ (pipe.set("a", memoryview(b"a1")).get("a"))
+ assert pipe.execute() == [
+ True,
+ b"a1",
+ ]
def test_pipeline_length(self, r):
with r.pipeline() as pipe:
@@ -44,7 +44,7 @@ class TestPipeline:
assert len(pipe) == 0
# Fill 'er up!
- pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1')
+ pipe.set("a", "a1").set("b", "b1").set("c", "c1")
assert len(pipe) == 3
# Execute calls reset(), so empty once again.
@@ -53,83 +53,84 @@ class TestPipeline:
def test_pipeline_no_transaction(self, r):
with r.pipeline(transaction=False) as pipe:
- pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1')
+ pipe.set("a", "a1").set("b", "b1").set("c", "c1")
assert pipe.execute() == [True, True, True]
- assert r['a'] == b'a1'
- assert r['b'] == b'b1'
- assert r['c'] == b'c1'
+ assert r["a"] == b"a1"
+ assert r["b"] == b"b1"
+ assert r["c"] == b"c1"
@pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch(self, r):
- r['a'] = 0
+ r["a"] = 0
with r.pipeline(transaction=False) as pipe:
- pipe.watch('a')
- a = pipe.get('a')
+ pipe.watch("a")
+ a = pipe.get("a")
pipe.multi()
- pipe.set('a', int(a) + 1)
+ pipe.set("a", int(a) + 1)
assert pipe.execute() == [True]
@pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch_failure(self, r):
- r['a'] = 0
+ r["a"] = 0
with r.pipeline(transaction=False) as pipe:
- pipe.watch('a')
- a = pipe.get('a')
+ pipe.watch("a")
+ a = pipe.get("a")
- r['a'] = 'bad'
+ r["a"] = "bad"
pipe.multi()
- pipe.set('a', int(a) + 1)
+ pipe.set("a", int(a) + 1)
with pytest.raises(redis.WatchError):
pipe.execute()
- assert r['a'] == b'bad'
+ assert r["a"] == b"bad"
def test_exec_error_in_response(self, r):
"""
an invalid pipeline command at exec time adds the exception instance
to the list of returned values
"""
- r['c'] = 'a'
+ r["c"] = "a"
with r.pipeline() as pipe:
- pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4)
+ pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4)
result = pipe.execute(raise_on_error=False)
assert result[0]
- assert r['a'] == b'1'
+ assert r["a"] == b"1"
assert result[1]
- assert r['b'] == b'2'
+ assert r["b"] == b"2"
# we can't lpush to a key that's a string value, so this should
# be a ResponseError exception
assert isinstance(result[2], redis.ResponseError)
- assert r['c'] == b'a'
+ assert r["c"] == b"a"
# since this isn't a transaction, the other commands after the
# error are still executed
assert result[3]
- assert r['d'] == b'4'
+ assert r["d"] == b"4"
# make sure the pipe was restored to a working state
- assert pipe.set('z', 'zzz').execute() == [True]
- assert r['z'] == b'zzz'
+ assert pipe.set("z", "zzz").execute() == [True]
+ assert r["z"] == b"zzz"
def test_exec_error_raised(self, r):
- r['c'] = 'a'
+ r["c"] = "a"
with r.pipeline() as pipe:
- pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4)
+ pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
- assert str(ex.value).startswith('Command # 3 (LPUSH c 3) of '
- 'pipeline caused error: ')
+ assert str(ex.value).startswith(
+ "Command # 3 (LPUSH c 3) of " "pipeline caused error: "
+ )
# make sure the pipe was restored to a working state
- assert pipe.set('z', 'zzz').execute() == [True]
- assert r['z'] == b'zzz'
+ assert pipe.set("z", "zzz").execute() == [True]
+ assert r["z"] == b"zzz"
@pytest.mark.onlynoncluster
def test_transaction_with_empty_error_command(self, r):
@@ -139,7 +140,7 @@ class TestPipeline:
"""
for error_switch in (True, False):
with r.pipeline() as pipe:
- pipe.set('a', 1).mget([]).set('c', 3)
+ pipe.set("a", 1).mget([]).set("c", 3)
result = pipe.execute(raise_on_error=error_switch)
assert result[0]
@@ -154,7 +155,7 @@ class TestPipeline:
"""
for error_switch in (True, False):
with r.pipeline(transaction=False) as pipe:
- pipe.set('a', 1).mget([]).set('c', 3)
+ pipe.set("a", 1).mget([]).set("c", 3)
result = pipe.execute(raise_on_error=error_switch)
assert result[0]
@@ -164,61 +165,63 @@ class TestPipeline:
def test_parse_error_raised(self, r):
with r.pipeline() as pipe:
# the zrem is invalid because we don't pass any keys to it
- pipe.set('a', 1).zrem('b').set('b', 2)
+ pipe.set("a", 1).zrem("b").set("b", 2)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
- assert str(ex.value).startswith('Command # 2 (ZREM b) of '
- 'pipeline caused error: ')
+ assert str(ex.value).startswith(
+ "Command # 2 (ZREM b) of " "pipeline caused error: "
+ )
# make sure the pipe was restored to a working state
- assert pipe.set('z', 'zzz').execute() == [True]
- assert r['z'] == b'zzz'
+ assert pipe.set("z", "zzz").execute() == [True]
+ assert r["z"] == b"zzz"
@pytest.mark.onlynoncluster
def test_parse_error_raised_transaction(self, r):
with r.pipeline() as pipe:
pipe.multi()
# the zrem is invalid because we don't pass any keys to it
- pipe.set('a', 1).zrem('b').set('b', 2)
+ pipe.set("a", 1).zrem("b").set("b", 2)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
- assert str(ex.value).startswith('Command # 2 (ZREM b) of '
- 'pipeline caused error: ')
+ assert str(ex.value).startswith(
+ "Command # 2 (ZREM b) of " "pipeline caused error: "
+ )
# make sure the pipe was restored to a working state
- assert pipe.set('z', 'zzz').execute() == [True]
- assert r['z'] == b'zzz'
+ assert pipe.set("z", "zzz").execute() == [True]
+ assert r["z"] == b"zzz"
@pytest.mark.onlynoncluster
def test_watch_succeed(self, r):
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
with r.pipeline() as pipe:
- pipe.watch('a', 'b')
+ pipe.watch("a", "b")
assert pipe.watching
- a_value = pipe.get('a')
- b_value = pipe.get('b')
- assert a_value == b'1'
- assert b_value == b'2'
+ a_value = pipe.get("a")
+ b_value = pipe.get("b")
+ assert a_value == b"1"
+ assert b_value == b"2"
pipe.multi()
- pipe.set('c', 3)
+ pipe.set("c", 3)
assert pipe.execute() == [True]
assert not pipe.watching
@pytest.mark.onlynoncluster
def test_watch_failure(self, r):
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
with r.pipeline() as pipe:
- pipe.watch('a', 'b')
- r['b'] = 3
+ pipe.watch("a", "b")
+ r["b"] = 3
pipe.multi()
- pipe.get('a')
+ pipe.get("a")
with pytest.raises(redis.WatchError):
pipe.execute()
@@ -226,12 +229,12 @@ class TestPipeline:
@pytest.mark.onlynoncluster
def test_watch_failure_in_empty_transaction(self, r):
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
with r.pipeline() as pipe:
- pipe.watch('a', 'b')
- r['b'] = 3
+ pipe.watch("a", "b")
+ r["b"] = 3
pipe.multi()
with pytest.raises(redis.WatchError):
pipe.execute()
@@ -240,103 +243,104 @@ class TestPipeline:
@pytest.mark.onlynoncluster
def test_unwatch(self, r):
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
with r.pipeline() as pipe:
- pipe.watch('a', 'b')
- r['b'] = 3
+ pipe.watch("a", "b")
+ r["b"] = 3
pipe.unwatch()
assert not pipe.watching
- pipe.get('a')
- assert pipe.execute() == [b'1']
+ pipe.get("a")
+ assert pipe.execute() == [b"1"]
@pytest.mark.onlynoncluster
def test_watch_exec_no_unwatch(self, r):
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
with r.monitor() as m:
with r.pipeline() as pipe:
- pipe.watch('a', 'b')
+ pipe.watch("a", "b")
assert pipe.watching
- a_value = pipe.get('a')
- b_value = pipe.get('b')
- assert a_value == b'1'
- assert b_value == b'2'
+ a_value = pipe.get("a")
+ b_value = pipe.get("b")
+ assert a_value == b"1"
+ assert b_value == b"2"
pipe.multi()
- pipe.set('c', 3)
+ pipe.set("c", 3)
assert pipe.execute() == [True]
assert not pipe.watching
- unwatch_command = wait_for_command(r, m, 'UNWATCH')
+ unwatch_command = wait_for_command(r, m, "UNWATCH")
assert unwatch_command is None, "should not send UNWATCH"
@pytest.mark.onlynoncluster
def test_watch_reset_unwatch(self, r):
- r['a'] = 1
+ r["a"] = 1
with r.monitor() as m:
with r.pipeline() as pipe:
- pipe.watch('a')
+ pipe.watch("a")
assert pipe.watching
pipe.reset()
assert not pipe.watching
- unwatch_command = wait_for_command(r, m, 'UNWATCH')
+ unwatch_command = wait_for_command(r, m, "UNWATCH")
assert unwatch_command is not None
- assert unwatch_command['command'] == 'UNWATCH'
+ assert unwatch_command["command"] == "UNWATCH"
@pytest.mark.onlynoncluster
def test_transaction_callable(self, r):
- r['a'] = 1
- r['b'] = 2
+ r["a"] = 1
+ r["b"] = 2
has_run = []
def my_transaction(pipe):
- a_value = pipe.get('a')
- assert a_value in (b'1', b'2')
- b_value = pipe.get('b')
- assert b_value == b'2'
+ a_value = pipe.get("a")
+ assert a_value in (b"1", b"2")
+ b_value = pipe.get("b")
+ assert b_value == b"2"
# silly run-once code... incr's "a" so WatchError should be raised
# forcing this all to run again. this should incr "a" once to "2"
if not has_run:
- r.incr('a')
- has_run.append('it has')
+ r.incr("a")
+ has_run.append("it has")
pipe.multi()
- pipe.set('c', int(a_value) + int(b_value))
+ pipe.set("c", int(a_value) + int(b_value))
- result = r.transaction(my_transaction, 'a', 'b')
+ result = r.transaction(my_transaction, "a", "b")
assert result == [True]
- assert r['c'] == b'4'
+ assert r["c"] == b"4"
@pytest.mark.onlynoncluster
def test_transaction_callable_returns_value_from_callable(self, r):
def callback(pipe):
# No need to do anything here since we only want the return value
- return 'a'
+ return "a"
- res = r.transaction(callback, 'my-key', value_from_callable=True)
- assert res == 'a'
+ res = r.transaction(callback, "my-key", value_from_callable=True)
+ assert res == "a"
def test_exec_error_in_no_transaction_pipeline(self, r):
- r['a'] = 1
+ r["a"] = 1
with r.pipeline(transaction=False) as pipe:
- pipe.llen('a')
- pipe.expire('a', 100)
+ pipe.llen("a")
+ pipe.expire("a", 100)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
- assert str(ex.value).startswith('Command # 1 (LLEN a) of '
- 'pipeline caused error: ')
+ assert str(ex.value).startswith(
+ "Command # 1 (LLEN a) of " "pipeline caused error: "
+ )
- assert r['a'] == b'1'
+ assert r["a"] == b"1"
def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r):
- key = chr(3456) + 'abcd' + chr(3421)
+ key = chr(3456) + "abcd" + chr(3421)
r[key] = 1
with r.pipeline(transaction=False) as pipe:
pipe.llen(key)
@@ -345,51 +349,52 @@ class TestPipeline:
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
- expected = f'Command # 1 (LLEN {key}) of pipeline caused error: '
+ expected = f"Command # 1 (LLEN {key}) of pipeline caused error: "
assert str(ex.value).startswith(expected)
- assert r[key] == b'1'
+ assert r[key] == b"1"
def test_pipeline_with_bitfield(self, r):
with r.pipeline() as pipe:
- pipe.set('a', '1')
- bf = pipe.bitfield('b')
- pipe2 = (bf
- .set('u8', 8, 255)
- .get('u8', 0)
- .get('u4', 8) # 1111
- .get('u4', 12) # 1111
- .get('u4', 13) # 1110
- .execute())
- pipe.get('a')
+ pipe.set("a", "1")
+ bf = pipe.bitfield("b")
+ pipe2 = (
+ bf.set("u8", 8, 255)
+ .get("u8", 0)
+ .get("u4", 8) # 1111
+ .get("u4", 12) # 1111
+ .get("u4", 13) # 1110
+ .execute()
+ )
+ pipe.get("a")
response = pipe.execute()
assert pipe == pipe2
- assert response == [True, [0, 0, 15, 15, 14], b'1']
+ assert response == [True, [0, 0, 15, 15, 14], b"1"]
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.0.0')
+ @skip_if_server_version_lt("2.0.0")
def test_pipeline_discard(self, r):
# empty pipeline should raise an error
with r.pipeline() as pipe:
- pipe.set('key', 'someval')
+ pipe.set("key", "someval")
pipe.discard()
with pytest.raises(redis.exceptions.ResponseError):
pipe.execute()
# setting a pipeline and discarding should do the same
with r.pipeline() as pipe:
- pipe.set('key', 'someval')
- pipe.set('someotherkey', 'val')
+ pipe.set("key", "someval")
+ pipe.set("someotherkey", "val")
response = pipe.execute()
- pipe.set('key', 'another value!')
+ pipe.set("key", "another value!")
pipe.discard()
- pipe.set('key', 'another vae!')
+ pipe.set("key", "another vae!")
with pytest.raises(redis.exceptions.ResponseError):
pipe.execute()
- pipe.set('foo', 'bar')
+ pipe.set("foo", "bar")
response = pipe.execute()
assert response[0]
- assert r.get('foo') == b'bar'
+ assert r.get("foo") == b"bar"
diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
index b019bae..6df0faf 100644
--- a/tests/test_pubsub.py
+++ b/tests/test_pubsub.py
@@ -1,17 +1,14 @@
+import platform
import threading
import time
from unittest import mock
-import platform
import pytest
+
import redis
from redis.exceptions import ConnectionError
-from .conftest import (
- _get_client,
- skip_if_redis_enterprise,
- skip_if_server_version_lt
-)
+from .conftest import _get_client, skip_if_redis_enterprise, skip_if_server_version_lt
def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
@@ -19,7 +16,8 @@ def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
timeout = now + timeout
while now < timeout:
message = pubsub.get_message(
- ignore_subscribe_messages=ignore_subscribe_messages)
+ ignore_subscribe_messages=ignore_subscribe_messages
+ )
if message is not None:
return message
time.sleep(0.01)
@@ -29,39 +27,39 @@ def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
def make_message(type, channel, data, pattern=None):
return {
- 'type': type,
- 'pattern': pattern and pattern.encode('utf-8') or None,
- 'channel': channel and channel.encode('utf-8') or None,
- 'data': data.encode('utf-8') if isinstance(data, str) else data
+ "type": type,
+ "pattern": pattern and pattern.encode("utf-8") or None,
+ "channel": channel and channel.encode("utf-8") or None,
+ "data": data.encode("utf-8") if isinstance(data, str) else data,
}
def make_subscribe_test_data(pubsub, type):
- if type == 'channel':
+ if type == "channel":
return {
- 'p': pubsub,
- 'sub_type': 'subscribe',
- 'unsub_type': 'unsubscribe',
- 'sub_func': pubsub.subscribe,
- 'unsub_func': pubsub.unsubscribe,
- 'keys': ['foo', 'bar', 'uni' + chr(4456) + 'code']
+ "p": pubsub,
+ "sub_type": "subscribe",
+ "unsub_type": "unsubscribe",
+ "sub_func": pubsub.subscribe,
+ "unsub_func": pubsub.unsubscribe,
+ "keys": ["foo", "bar", "uni" + chr(4456) + "code"],
}
- elif type == 'pattern':
+ elif type == "pattern":
return {
- 'p': pubsub,
- 'sub_type': 'psubscribe',
- 'unsub_type': 'punsubscribe',
- 'sub_func': pubsub.psubscribe,
- 'unsub_func': pubsub.punsubscribe,
- 'keys': ['f*', 'b*', 'uni' + chr(4456) + '*']
+ "p": pubsub,
+ "sub_type": "psubscribe",
+ "unsub_type": "punsubscribe",
+ "sub_func": pubsub.psubscribe,
+ "unsub_func": pubsub.punsubscribe,
+ "keys": ["f*", "b*", "uni" + chr(4456) + "*"],
}
- assert False, f'invalid subscribe type: {type}'
+ assert False, f"invalid subscribe type: {type}"
class TestPubSubSubscribeUnsubscribe:
-
- def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func,
- unsub_func, keys):
+ def _test_subscribe_unsubscribe(
+ self, p, sub_type, unsub_type, sub_func, unsub_func, keys
+ ):
for key in keys:
assert sub_func(key) is None
@@ -79,15 +77,16 @@ class TestPubSubSubscribeUnsubscribe:
assert wait_for_message(p) == make_message(unsub_type, key, i)
def test_channel_subscribe_unsubscribe(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
+ kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_subscribe_unsubscribe(**kwargs)
def test_pattern_subscribe_unsubscribe(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
+ kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_subscribe_unsubscribe(**kwargs)
- def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type,
- sub_func, unsub_func, keys):
+ def _test_resubscribe_on_reconnection(
+ self, p, sub_type, unsub_type, sub_func, unsub_func, keys
+ ):
for key in keys:
assert sub_func(key) is None
@@ -109,10 +108,10 @@ class TestPubSubSubscribeUnsubscribe:
unique_channels = set()
assert len(messages) == len(keys)
for i, message in enumerate(messages):
- assert message['type'] == sub_type
- assert message['data'] == i + 1
- assert isinstance(message['channel'], bytes)
- channel = message['channel'].decode('utf-8')
+ assert message["type"] == sub_type
+ assert message["data"] == i + 1
+ assert isinstance(message["channel"], bytes)
+ channel = message["channel"].decode("utf-8")
unique_channels.add(channel)
assert len(unique_channels) == len(keys)
@@ -120,16 +119,17 @@ class TestPubSubSubscribeUnsubscribe:
assert channel in keys
def test_resubscribe_to_channels_on_reconnection(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
+ kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_resubscribe_on_reconnection(**kwargs)
@pytest.mark.onlynoncluster
def test_resubscribe_to_patterns_on_reconnection(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
+ kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_resubscribe_on_reconnection(**kwargs)
- def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func,
- unsub_func, keys):
+ def _test_subscribed_property(
+ self, p, sub_type, unsub_type, sub_func, unsub_func, keys
+ ):
assert p.subscribed is False
sub_func(keys[0])
@@ -175,22 +175,22 @@ class TestPubSubSubscribeUnsubscribe:
assert p.subscribed is False
def test_subscribe_property_with_channels(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
+ kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_subscribed_property(**kwargs)
@pytest.mark.onlynoncluster
def test_subscribe_property_with_patterns(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
+ kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_subscribed_property(**kwargs)
def test_ignore_all_subscribe_messages(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
checks = (
- (p.subscribe, 'foo'),
- (p.unsubscribe, 'foo'),
- (p.psubscribe, 'f*'),
- (p.punsubscribe, 'f*'),
+ (p.subscribe, "foo"),
+ (p.unsubscribe, "foo"),
+ (p.psubscribe, "f*"),
+ (p.punsubscribe, "f*"),
)
assert p.subscribed is False
@@ -204,10 +204,10 @@ class TestPubSubSubscribeUnsubscribe:
p = r.pubsub()
checks = (
- (p.subscribe, 'foo'),
- (p.unsubscribe, 'foo'),
- (p.psubscribe, 'f*'),
- (p.punsubscribe, 'f*'),
+ (p.subscribe, "foo"),
+ (p.unsubscribe, "foo"),
+ (p.psubscribe, "f*"),
+ (p.punsubscribe, "f*"),
)
assert p.subscribed is False
@@ -219,16 +219,17 @@ class TestPubSubSubscribeUnsubscribe:
assert p.subscribed is False
def test_sub_unsub_resub_channels(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
+ kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_sub_unsub_resub(**kwargs)
@pytest.mark.onlynoncluster
def test_sub_unsub_resub_patterns(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
+ kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_sub_unsub_resub(**kwargs)
- def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func,
- unsub_func, keys):
+ def _test_sub_unsub_resub(
+ self, p, sub_type, unsub_type, sub_func, unsub_func, keys
+ ):
# https://github.com/andymccurdy/redis-py/issues/764
key = keys[0]
sub_func(key)
@@ -241,15 +242,16 @@ class TestPubSubSubscribeUnsubscribe:
assert p.subscribed is True
def test_sub_unsub_all_resub_channels(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
+ kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_sub_unsub_all_resub(**kwargs)
def test_sub_unsub_all_resub_patterns(self, r):
- kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
+ kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_sub_unsub_all_resub(**kwargs)
- def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func,
- unsub_func, keys):
+ def _test_sub_unsub_all_resub(
+ self, p, sub_type, unsub_type, sub_func, unsub_func, keys
+ ):
# https://github.com/andymccurdy/redis-py/issues/764
key = keys[0]
sub_func(key)
@@ -271,22 +273,22 @@ class TestPubSubMessages:
def test_published_message_to_channel(self, r):
p = r.pubsub()
- p.subscribe('foo')
- assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
- assert r.publish('foo', 'test message') == 1
+ p.subscribe("foo")
+ assert wait_for_message(p) == make_message("subscribe", "foo", 1)
+ assert r.publish("foo", "test message") == 1
message = wait_for_message(p)
assert isinstance(message, dict)
- assert message == make_message('message', 'foo', 'test message')
+ assert message == make_message("message", "foo", "test message")
def test_published_message_to_pattern(self, r):
p = r.pubsub()
- p.subscribe('foo')
- p.psubscribe('f*')
- assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
- assert wait_for_message(p) == make_message('psubscribe', 'f*', 2)
+ p.subscribe("foo")
+ p.psubscribe("f*")
+ assert wait_for_message(p) == make_message("subscribe", "foo", 1)
+ assert wait_for_message(p) == make_message("psubscribe", "f*", 2)
# 1 to pattern, 1 to channel
- assert r.publish('foo', 'test message') == 2
+ assert r.publish("foo", "test message") == 2
message1 = wait_for_message(p)
message2 = wait_for_message(p)
@@ -294,8 +296,8 @@ class TestPubSubMessages:
assert isinstance(message2, dict)
expected = [
- make_message('message', 'foo', 'test message'),
- make_message('pmessage', 'foo', 'test message', pattern='f*')
+ make_message("message", "foo", "test message"),
+ make_message("pmessage", "foo", "test message", pattern="f*"),
]
assert message1 in expected
@@ -306,67 +308,65 @@ class TestPubSubMessages:
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(foo=self.message_handler)
assert wait_for_message(p) is None
- assert r.publish('foo', 'test message') == 1
+ assert r.publish("foo", "test message") == 1
assert wait_for_message(p) is None
- assert self.message == make_message('message', 'foo', 'test message')
+ assert self.message == make_message("message", "foo", "test message")
@pytest.mark.onlynoncluster
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
- p.psubscribe(**{'f*': self.message_handler})
+ p.psubscribe(**{"f*": self.message_handler})
assert wait_for_message(p) is None
- assert r.publish('foo', 'test message') == 1
+ assert r.publish("foo", "test message") == 1
assert wait_for_message(p) is None
- assert self.message == make_message('pmessage', 'foo', 'test message',
- pattern='f*')
+ assert self.message == make_message(
+ "pmessage", "foo", "test message", pattern="f*"
+ )
def test_unicode_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
- channel = 'uni' + chr(4456) + 'code'
+ channel = "uni" + chr(4456) + "code"
channels = {channel: self.message_handler}
p.subscribe(**channels)
assert wait_for_message(p) is None
- assert r.publish(channel, 'test message') == 1
+ assert r.publish(channel, "test message") == 1
assert wait_for_message(p) is None
- assert self.message == make_message('message', channel, 'test message')
+ assert self.message == make_message("message", channel, "test message")
@pytest.mark.onlynoncluster
# see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
# #known-limitations-with-pubsub
def test_unicode_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
- pattern = 'uni' + chr(4456) + '*'
- channel = 'uni' + chr(4456) + 'code'
+ pattern = "uni" + chr(4456) + "*"
+ channel = "uni" + chr(4456) + "code"
p.psubscribe(**{pattern: self.message_handler})
assert wait_for_message(p) is None
- assert r.publish(channel, 'test message') == 1
+ assert r.publish(channel, "test message") == 1
assert wait_for_message(p) is None
- assert self.message == make_message('pmessage', channel,
- 'test message', pattern=pattern)
+ assert self.message == make_message(
+ "pmessage", channel, "test message", pattern=pattern
+ )
def test_get_message_without_subscribe(self, r):
p = r.pubsub()
with pytest.raises(RuntimeError) as info:
p.get_message()
- expect = ('connection not set: '
- 'did you forget to call subscribe() or psubscribe()?')
+ expect = (
+ "connection not set: " "did you forget to call subscribe() or psubscribe()?"
+ )
assert expect in info.exconly()
class TestPubSubAutoDecoding:
"These tests only validate that we get unicode values back"
- channel = 'uni' + chr(4456) + 'code'
- pattern = 'uni' + chr(4456) + '*'
- data = 'abc' + chr(4458) + '123'
+ channel = "uni" + chr(4456) + "code"
+ pattern = "uni" + chr(4456) + "*"
+ data = "abc" + chr(4458) + "123"
def make_message(self, type, channel, data, pattern=None):
- return {
- 'type': type,
- 'channel': channel,
- 'pattern': pattern,
- 'data': data
- }
+ return {"type": type, "channel": channel, "pattern": pattern, "data": data}
def setup_method(self, method):
self.message = None
@@ -381,44 +381,37 @@ class TestPubSubAutoDecoding:
def test_channel_subscribe_unsubscribe(self, r):
p = r.pubsub()
p.subscribe(self.channel)
- assert wait_for_message(p) == self.make_message('subscribe',
- self.channel, 1)
+ assert wait_for_message(p) == self.make_message("subscribe", self.channel, 1)
p.unsubscribe(self.channel)
- assert wait_for_message(p) == self.make_message('unsubscribe',
- self.channel, 0)
+ assert wait_for_message(p) == self.make_message("unsubscribe", self.channel, 0)
def test_pattern_subscribe_unsubscribe(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
- assert wait_for_message(p) == self.make_message('psubscribe',
- self.pattern, 1)
+ assert wait_for_message(p) == self.make_message("psubscribe", self.pattern, 1)
p.punsubscribe(self.pattern)
- assert wait_for_message(p) == self.make_message('punsubscribe',
- self.pattern, 0)
+ assert wait_for_message(p) == self.make_message("punsubscribe", self.pattern, 0)
def test_channel_publish(self, r):
p = r.pubsub()
p.subscribe(self.channel)
- assert wait_for_message(p) == self.make_message('subscribe',
- self.channel, 1)
+ assert wait_for_message(p) == self.make_message("subscribe", self.channel, 1)
r.publish(self.channel, self.data)
- assert wait_for_message(p) == self.make_message('message',
- self.channel,
- self.data)
+ assert wait_for_message(p) == self.make_message(
+ "message", self.channel, self.data
+ )
@pytest.mark.onlynoncluster
def test_pattern_publish(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
- assert wait_for_message(p) == self.make_message('psubscribe',
- self.pattern, 1)
+ assert wait_for_message(p) == self.make_message("psubscribe", self.pattern, 1)
r.publish(self.channel, self.data)
- assert wait_for_message(p) == self.make_message('pmessage',
- self.channel,
- self.data,
- pattern=self.pattern)
+ assert wait_for_message(p) == self.make_message(
+ "pmessage", self.channel, self.data, pattern=self.pattern
+ )
def test_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
@@ -426,18 +419,16 @@ class TestPubSubAutoDecoding:
assert wait_for_message(p) is None
r.publish(self.channel, self.data)
assert wait_for_message(p) is None
- assert self.message == self.make_message('message', self.channel,
- self.data)
+ assert self.message == self.make_message("message", self.channel, self.data)
# test that we reconnected to the correct channel
self.message = None
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
- new_data = self.data + 'new data'
+ new_data = self.data + "new data"
r.publish(self.channel, new_data)
assert wait_for_message(p) is None
- assert self.message == self.make_message('message', self.channel,
- new_data)
+ assert self.message == self.make_message("message", self.channel, new_data)
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
@@ -445,24 +436,24 @@ class TestPubSubAutoDecoding:
assert wait_for_message(p) is None
r.publish(self.channel, self.data)
assert wait_for_message(p) is None
- assert self.message == self.make_message('pmessage', self.channel,
- self.data,
- pattern=self.pattern)
+ assert self.message == self.make_message(
+ "pmessage", self.channel, self.data, pattern=self.pattern
+ )
# test that we reconnected to the correct pattern
self.message = None
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
- new_data = self.data + 'new data'
+ new_data = self.data + "new data"
r.publish(self.channel, new_data)
assert wait_for_message(p) is None
- assert self.message == self.make_message('pmessage', self.channel,
- new_data,
- pattern=self.pattern)
+ assert self.message == self.make_message(
+ "pmessage", self.channel, new_data, pattern=self.pattern
+ )
def test_context_manager(self, r):
with r.pubsub() as pubsub:
- pubsub.subscribe('foo')
+ pubsub.subscribe("foo")
assert pubsub.connection is not None
assert pubsub.connection is None
@@ -471,86 +462,82 @@ class TestPubSubAutoDecoding:
class TestPubSubRedisDown:
-
def test_channel_subscribe(self, r):
- r = redis.Redis(host='localhost', port=6390)
+ r = redis.Redis(host="localhost", port=6390)
p = r.pubsub()
with pytest.raises(ConnectionError):
- p.subscribe('foo')
+ p.subscribe("foo")
class TestPubSubSubcommands:
-
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_pubsub_channels(self, r):
p = r.pubsub()
- p.subscribe('foo', 'bar', 'baz', 'quux')
+ p.subscribe("foo", "bar", "baz", "quux")
for i in range(4):
- assert wait_for_message(p)['type'] == 'subscribe'
- expected = [b'bar', b'baz', b'foo', b'quux']
+ assert wait_for_message(p)["type"] == "subscribe"
+ expected = [b"bar", b"baz", b"foo", b"quux"]
assert all([channel in r.pubsub_channels() for channel in expected])
@pytest.mark.onlynoncluster
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_pubsub_numsub(self, r):
p1 = r.pubsub()
- p1.subscribe('foo', 'bar', 'baz')
+ p1.subscribe("foo", "bar", "baz")
for i in range(3):
- assert wait_for_message(p1)['type'] == 'subscribe'
+ assert wait_for_message(p1)["type"] == "subscribe"
p2 = r.pubsub()
- p2.subscribe('bar', 'baz')
+ p2.subscribe("bar", "baz")
for i in range(2):
- assert wait_for_message(p2)['type'] == 'subscribe'
+ assert wait_for_message(p2)["type"] == "subscribe"
p3 = r.pubsub()
- p3.subscribe('baz')
- assert wait_for_message(p3)['type'] == 'subscribe'
+ p3.subscribe("baz")
+ assert wait_for_message(p3)["type"] == "subscribe"
- channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
- assert r.pubsub_numsub('foo', 'bar', 'baz') == channels
+ channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)]
+ assert r.pubsub_numsub("foo", "bar", "baz") == channels
- @skip_if_server_version_lt('2.8.0')
+ @skip_if_server_version_lt("2.8.0")
def test_pubsub_numpat(self, r):
p = r.pubsub()
- p.psubscribe('*oo', '*ar', 'b*z')
+ p.psubscribe("*oo", "*ar", "b*z")
for i in range(3):
- assert wait_for_message(p)['type'] == 'psubscribe'
+ assert wait_for_message(p)["type"] == "psubscribe"
assert r.pubsub_numpat() == 3
class TestPubSubPings:
-
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
def test_send_pubsub_ping(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
- p.subscribe('foo')
+ p.subscribe("foo")
p.ping()
- assert wait_for_message(p) == make_message(type='pong', channel=None,
- data='',
- pattern=None)
+ assert wait_for_message(p) == make_message(
+ type="pong", channel=None, data="", pattern=None
+ )
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
def test_send_pubsub_ping_message(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
- p.subscribe('foo')
- p.ping(message='hello world')
- assert wait_for_message(p) == make_message(type='pong', channel=None,
- data='hello world',
- pattern=None)
+ p.subscribe("foo")
+ p.ping(message="hello world")
+ assert wait_for_message(p) == make_message(
+ type="pong", channel=None, data="hello world", pattern=None
+ )
@pytest.mark.onlynoncluster
class TestPubSubConnectionKilled:
-
- @skip_if_server_version_lt('3.0.0')
+ @skip_if_server_version_lt("3.0.0")
@skip_if_redis_enterprise
def test_connection_error_raised_when_connection_dies(self, r):
p = r.pubsub()
- p.subscribe('foo')
- assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
+ p.subscribe("foo")
+ assert wait_for_message(p) == make_message("subscribe", "foo", 1)
for client in r.client_list():
- if client['cmd'] == 'subscribe':
- r.client_kill_filter(_id=client['id'])
+ if client["cmd"] == "subscribe":
+ r.client_kill_filter(_id=client["id"])
with pytest.raises(ConnectionError):
wait_for_message(p)
@@ -558,15 +545,15 @@ class TestPubSubConnectionKilled:
class TestPubSubTimeouts:
def test_get_message_with_timeout_returns_none(self, r):
p = r.pubsub()
- p.subscribe('foo')
- assert wait_for_message(p) == make_message('subscribe', 'foo', 1)
+ p.subscribe("foo")
+ assert wait_for_message(p) == make_message("subscribe", "foo", 1)
assert p.get_message(timeout=0.01) is None
class TestPubSubWorkerThread:
-
- @pytest.mark.skipif(platform.python_implementation() == 'PyPy',
- reason="Pypy threading issue")
+ @pytest.mark.skipif(
+ platform.python_implementation() == "PyPy", reason="Pypy threading issue"
+ )
def test_pubsub_worker_thread_exception_handler(self, r):
event = threading.Event()
@@ -575,12 +562,10 @@ class TestPubSubWorkerThread:
event.set()
p = r.pubsub()
- p.subscribe(**{'foo': lambda m: m})
- with mock.patch.object(p, 'get_message',
- side_effect=Exception('error')):
+ p.subscribe(**{"foo": lambda m: m})
+ with mock.patch.object(p, "get_message", side_effect=Exception("error")):
pubsub_thread = p.run_in_thread(
- daemon=True,
- exception_handler=exception_handler
+ daemon=True, exception_handler=exception_handler
)
assert event.wait(timeout=1.0)
@@ -589,10 +574,9 @@ class TestPubSubWorkerThread:
class TestPubSubDeadlock:
- @pytest.mark.timeout(30, method='thread')
+ @pytest.mark.timeout(30, method="thread")
def test_pubsub_deadlock(self, master_host):
- pool = redis.ConnectionPool(host=master_host[0],
- port=master_host[1])
+ pool = redis.ConnectionPool(host=master_host[0], port=master_host[1])
r = redis.Redis(connection_pool=pool)
for i in range(60):
diff --git a/tests/test_retry.py b/tests/test_retry.py
index 535485a..c4650bc 100644
--- a/tests/test_retry.py
+++ b/tests/test_retry.py
@@ -1,8 +1,8 @@
-from redis.backoff import NoBackoff
import pytest
-from redis.exceptions import ConnectionError
+from redis.backoff import NoBackoff
from redis.connection import Connection, UnixDomainSocketConnection
+from redis.exceptions import ConnectionError
from redis.retry import Retry
@@ -34,8 +34,7 @@ class TestConnectionConstructorWithRetry:
@pytest.mark.parametrize("Class", [Connection, UnixDomainSocketConnection])
def test_retry_on_timeout_retry(self, Class, retries):
retry_on_timeout = retries > 0
- c = Class(retry_on_timeout=retry_on_timeout,
- retry=Retry(NoBackoff(), retries))
+ c = Class(retry_on_timeout=retry_on_timeout, retry=Retry(NoBackoff(), retries))
assert c.retry_on_timeout == retry_on_timeout
assert isinstance(c.retry, Retry)
assert c.retry._retries == retries
diff --git a/tests/test_scripting.py b/tests/test_scripting.py
index 7614b12..9f4f820 100644
--- a/tests/test_scripting.py
+++ b/tests/test_scripting.py
@@ -1,10 +1,8 @@
import pytest
from redis import exceptions
-
from tests.conftest import skip_if_server_version_lt
-
multiply_script = """
local value = redis.call('GET', KEYS[1])
value = tonumber(value)
@@ -29,52 +27,52 @@ class TestScripting:
r.script_flush()
def test_eval(self, r):
- r.set('a', 2)
+ r.set("a", 2)
# 2 * 3 == 6
- assert r.eval(multiply_script, 1, 'a', 3) == 6
+ assert r.eval(multiply_script, 1, "a", 3) == 6
- @skip_if_server_version_lt('6.2.0')
+ @skip_if_server_version_lt("6.2.0")
def test_script_flush_620(self, r):
- r.set('a', 2)
+ r.set("a", 2)
r.script_load(multiply_script)
- r.script_flush('ASYNC')
+ r.script_flush("ASYNC")
- r.set('a', 2)
+ r.set("a", 2)
r.script_load(multiply_script)
- r.script_flush('SYNC')
+ r.script_flush("SYNC")
- r.set('a', 2)
+ r.set("a", 2)
r.script_load(multiply_script)
r.script_flush()
with pytest.raises(exceptions.DataError):
- r.set('a', 2)
+ r.set("a", 2)
r.script_load(multiply_script)
r.script_flush("NOTREAL")
def test_script_flush(self, r):
- r.set('a', 2)
+ r.set("a", 2)
r.script_load(multiply_script)
r.script_flush(None)
with pytest.raises(exceptions.DataError):
- r.set('a', 2)
+ r.set("a", 2)
r.script_load(multiply_script)
r.script_flush("NOTREAL")
def test_evalsha(self, r):
- r.set('a', 2)
+ r.set("a", 2)
sha = r.script_load(multiply_script)
# 2 * 3 == 6
- assert r.evalsha(sha, 1, 'a', 3) == 6
+ assert r.evalsha(sha, 1, "a", 3) == 6
def test_evalsha_script_not_loaded(self, r):
- r.set('a', 2)
+ r.set("a", 2)
sha = r.script_load(multiply_script)
# remove the script from Redis's cache
r.script_flush()
with pytest.raises(exceptions.NoScriptError):
- r.evalsha(sha, 1, 'a', 3)
+ r.evalsha(sha, 1, "a", 3)
def test_script_loading(self, r):
# get the sha, then clear the cache
@@ -85,31 +83,31 @@ class TestScripting:
assert r.script_exists(sha) == [True]
def test_script_object(self, r):
- r.set('a', 2)
+ r.set("a", 2)
multiply = r.register_script(multiply_script)
precalculated_sha = multiply.sha
assert precalculated_sha
assert r.script_exists(multiply.sha) == [False]
# Test second evalsha block (after NoScriptError)
- assert multiply(keys=['a'], args=[3]) == 6
+ assert multiply(keys=["a"], args=[3]) == 6
# At this point, the script should be loaded
assert r.script_exists(multiply.sha) == [True]
# Test that the precalculated sha matches the one from redis
assert multiply.sha == precalculated_sha
# Test first evalsha block
- assert multiply(keys=['a'], args=[3]) == 6
+ assert multiply(keys=["a"], args=[3]) == 6
def test_script_object_in_pipeline(self, r):
multiply = r.register_script(multiply_script)
precalculated_sha = multiply.sha
assert precalculated_sha
pipe = r.pipeline()
- pipe.set('a', 2)
- pipe.get('a')
- multiply(keys=['a'], args=[3], client=pipe)
+ pipe.set("a", 2)
+ pipe.get("a")
+ multiply(keys=["a"], args=[3], client=pipe)
assert r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
- assert pipe.execute() == [True, b'2', 6]
+ assert pipe.execute() == [True, b"2", 6]
# The script should have been loaded by pipe.execute()
assert r.script_exists(multiply.sha) == [True]
# The precalculated sha should have been the correct one
@@ -119,12 +117,12 @@ class TestScripting:
# the multiply script should be reloaded by pipe.execute()
r.script_flush()
pipe = r.pipeline()
- pipe.set('a', 2)
- pipe.get('a')
- multiply(keys=['a'], args=[3], client=pipe)
+ pipe.set("a", 2)
+ pipe.get("a")
+ multiply(keys=["a"], args=[3], client=pipe)
assert r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
- assert pipe.execute() == [True, b'2', 6]
+ assert pipe.execute() == [True, b"2", 6]
assert r.script_exists(multiply.sha) == [True]
def test_eval_msgpack_pipeline_error_in_lua(self, r):
@@ -135,12 +133,12 @@ class TestScripting:
# avoiding a dependency to msgpack, this is the output of
# msgpack.dumps({"name": "joe"})
- msgpack_message_1 = b'\x81\xa4name\xa3Joe'
+ msgpack_message_1 = b"\x81\xa4name\xa3Joe"
msgpack_hello(args=[msgpack_message_1], client=pipe)
assert r.script_exists(msgpack_hello.sha) == [False]
- assert pipe.execute()[0] == b'hello Joe'
+ assert pipe.execute()[0] == b"hello Joe"
assert r.script_exists(msgpack_hello.sha) == [True]
msgpack_hello_broken = r.register_script(msgpack_hello_script_broken)
diff --git a/tests/test_search.py b/tests/test_search.py
index c7b570c..5b6a660 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -1,52 +1,32 @@
-import pytest
-import redis
import bz2
import csv
-import time
import os
-
+import time
from io import TextIOWrapper
-from .conftest import skip_ifmodversion_lt, default_redismod_url
-from redis import Redis
+import pytest
+
+import redis
import redis.commands.search
+import redis.commands.search.aggregation as aggregations
+import redis.commands.search.reducers as reducers
+from redis import Redis
from redis.commands.json.path import Path
from redis.commands.search import Search
-from redis.commands.search.field import (
- GeoField,
- NumericField,
- TagField,
- TextField
-)
-from redis.commands.search.query import (
- GeoFilter,
- NumericFilter,
- Query
-)
-from redis.commands.search.result import Result
+from redis.commands.search.field import GeoField, NumericField, TagField, TextField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
+from redis.commands.search.query import GeoFilter, NumericFilter, Query
+from redis.commands.search.result import Result
from redis.commands.search.suggestion import Suggestion
-import redis.commands.search.aggregation as aggregations
-import redis.commands.search.reducers as reducers
-WILL_PLAY_TEXT = (
- os.path.abspath(
- os.path.join(
- os.path.dirname(__file__),
- "testdata",
- "will_play_text.csv.bz2"
- )
- )
+from .conftest import default_redismod_url, skip_ifmodversion_lt
+
+WILL_PLAY_TEXT = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "testdata", "will_play_text.csv.bz2")
)
-TITLES_CSV = (
- os.path.abspath(
- os.path.join(
- os.path.dirname(__file__),
- "testdata",
- "titles.csv"
- )
- )
+TITLES_CSV = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "testdata", "titles.csv")
)
@@ -81,9 +61,7 @@ def getClient():
def createIndex(client, num_docs=100, definition=None):
try:
client.create_index(
- (TextField("play", weight=5.0),
- TextField("txt"),
- NumericField("chapter")),
+ (TextField("play", weight=5.0), TextField("txt"), NumericField("chapter")),
definition=definition,
)
except redis.ResponseError:
@@ -96,8 +74,7 @@ def createIndex(client, num_docs=100, definition=None):
r = csv.reader(bzfp, delimiter=";")
for n, line in enumerate(r):
- play, chapter, _, text = \
- line[1], line[2], line[4], line[5]
+ play, chapter, _, text = line[1], line[2], line[4], line[5]
key = f"{play}:{chapter}".lower()
d = chapters.setdefault(key, {})
@@ -183,12 +160,10 @@ def test_client(client):
# test in fields
txt_total = (
- client.ft().search(
- Query("henry").no_content().limit_fields("txt")).total
+ client.ft().search(Query("henry").no_content().limit_fields("txt")).total
)
play_total = (
- client.ft().search(
- Query("henry").no_content().limit_fields("play")).total
+ client.ft().search(Query("henry").no_content().limit_fields("play")).total
)
both_total = (
client.ft()
@@ -217,10 +192,8 @@ def test_client(client):
# test slop and in order
assert 193 == client.ft().search(Query("henry king")).total
- assert 3 == client.ft().search(
- Query("henry king").slop(0).in_order()).total
- assert 52 == client.ft().search(
- Query("king henry").slop(0).in_order()).total
+ assert 3 == client.ft().search(Query("henry king").slop(0).in_order()).total
+ assert 52 == client.ft().search(Query("king henry").slop(0).in_order()).total
assert 53 == client.ft().search(Query("henry king").slop(0)).total
assert 167 == client.ft().search(Query("henry king").slop(100)).total
@@ -284,11 +257,7 @@ def test_replace(client):
res = client.ft().search("foo bar")
assert 2 == res.total
- client.ft().add_document(
- "doc1",
- replace=True,
- txt="this is a replaced doc"
- )
+ client.ft().add_document("doc1", replace=True, txt="this is a replaced doc")
res = client.ft().search("foo bar")
assert 1 == res.total
@@ -301,10 +270,7 @@ def test_replace(client):
@pytest.mark.redismod
def test_stopwords(client):
- client.ft().create_index(
- (TextField("txt"),),
- stopwords=["foo", "bar", "baz"]
- )
+ client.ft().create_index((TextField("txt"),), stopwords=["foo", "bar", "baz"])
client.ft().add_document("doc1", txt="foo bar")
client.ft().add_document("doc2", txt="hello world")
waitForIndex(client, "idx")
@@ -318,17 +284,8 @@ def test_stopwords(client):
@pytest.mark.redismod
def test_filters(client):
- client.ft().create_index(
- (TextField("txt"),
- NumericField("num"),
- GeoField("loc"))
- )
- client.ft().add_document(
- "doc1",
- txt="foo bar",
- num=3.141,
- loc="-0.441,51.458"
- )
+ client.ft().create_index((TextField("txt"), NumericField("num"), GeoField("loc")))
+ client.ft().add_document("doc1", txt="foo bar", num=3.141, loc="-0.441,51.458")
client.ft().add_document("doc2", txt="foo baz", num=2, loc="-0.1,51.2")
waitForIndex(client, "idx")
@@ -336,8 +293,7 @@ def test_filters(client):
q1 = Query("foo").add_filter(NumericFilter("num", 0, 2)).no_content()
q2 = (
Query("foo")
- .add_filter(
- NumericFilter("num", 2, NumericFilter.INF, minExclusive=True))
+ .add_filter(NumericFilter("num", 2, NumericFilter.INF, minExclusive=True))
.no_content()
)
res1, res2 = client.ft().search(q1), client.ft().search(q2)
@@ -348,10 +304,8 @@ def test_filters(client):
assert "doc1" == res2.docs[0].id
# Test geo filter
- q1 = Query("foo").add_filter(
- GeoFilter("loc", -0.44, 51.45, 10)).no_content()
- q2 = Query("foo").add_filter(
- GeoFilter("loc", -0.44, 51.45, 100)).no_content()
+ q1 = Query("foo").add_filter(GeoFilter("loc", -0.44, 51.45, 10)).no_content()
+ q2 = Query("foo").add_filter(GeoFilter("loc", -0.44, 51.45, 100)).no_content()
res1, res2 = client.ft().search(q1), client.ft().search(q2)
assert 1 == res1.total
@@ -377,10 +331,7 @@ def test_payloads_with_no_content(client):
@pytest.mark.redismod
def test_sort_by(client):
- client.ft().create_index(
- (TextField("txt"),
- NumericField("num", sortable=True))
- )
+ client.ft().create_index((TextField("txt"), NumericField("num", sortable=True)))
client.ft().add_document("doc1", txt="foo bar", num=1)
client.ft().add_document("doc2", txt="foo baz", num=2)
client.ft().add_document("doc3", txt="foo qux", num=3)
@@ -422,10 +373,7 @@ def test_drop_index():
@pytest.mark.redismod
def test_example(client):
# Creating the index definition and schema
- client.ft().create_index(
- (TextField("title", weight=5.0),
- TextField("body"))
- )
+ client.ft().create_index((TextField("title", weight=5.0), TextField("body")))
# Indexing a document
client.ft().add_document(
@@ -483,12 +431,7 @@ def test_auto_complete(client):
client.ft().sugadd("ac", Suggestion("pay2", payload="pl2"))
client.ft().sugadd("ac", Suggestion("pay3", payload="pl3"))
- sugs = client.ft().sugget(
- "ac",
- "pay",
- with_payloads=True,
- with_scores=True
- )
+ sugs = client.ft().sugget("ac", "pay", with_payloads=True, with_scores=True)
assert 3 == len(sugs)
for sug in sugs:
assert sug.payload
@@ -550,11 +493,7 @@ def test_no_index(client):
@pytest.mark.redismod
def test_partial(client):
- client.ft().create_index(
- (TextField("f1"),
- TextField("f2"),
- TextField("f3"))
- )
+ client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
client.ft().add_document("doc1", f3="f3_val", partial=True)
@@ -572,11 +511,7 @@ def test_partial(client):
@pytest.mark.redismod
def test_no_create(client):
- client.ft().create_index(
- (TextField("f1"),
- TextField("f2"),
- TextField("f3"))
- )
+ client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
client.ft().add_document("doc1", f3="f3_val", no_create=True)
@@ -592,21 +527,12 @@ def test_no_create(client):
assert 1 == res.total
with pytest.raises(redis.ResponseError):
- client.ft().add_document(
- "doc3",
- f2="f2_val",
- f3="f3_val",
- no_create=True
- )
+ client.ft().add_document("doc3", f2="f2_val", f3="f3_val", no_create=True)
@pytest.mark.redismod
def test_explain(client):
- client.ft().create_index(
- (TextField("f1"),
- TextField("f2"),
- TextField("f3"))
- )
+ client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
res = client.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val")
assert res
@@ -629,8 +555,8 @@ def test_summarize(client):
doc = sorted(client.ft().search(q).docs)[0]
assert "<b>Henry</b> IV" == doc.play
assert (
- "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc.txt
+ "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
+ == doc.txt
)
q = Query("king henry").paging(0, 1).summarize().highlight()
@@ -638,8 +564,8 @@ def test_summarize(client):
doc = sorted(client.ft().search(q).docs)[0]
assert "<b>Henry</b> ... " == doc.play
assert (
- "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc.txt
+ "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
+ == doc.txt
)
@@ -786,11 +712,7 @@ def test_alter_schema_add(client):
def test_spell_check(client):
client.ft().create_index((TextField("f1"), TextField("f2")))
- client.ft().add_document(
- "doc1",
- f1="some valid content",
- f2="this is sample text"
- )
+ client.ft().add_document("doc1", f1="some valid content", f2="this is sample text")
client.ft().add_document("doc2", f1="very important", f2="lorem ipsum")
waitForIndex(client, "idx")
@@ -812,10 +734,10 @@ def test_spell_check(client):
res = client.ft().spellcheck("lorm", include="dict")
assert len(res["lorm"]) == 3
assert (
- res["lorm"][0]["suggestion"],
- res["lorm"][1]["suggestion"],
- res["lorm"][2]["suggestion"],
- ) == ("lorem", "lore", "lorm")
+ res["lorm"][0]["suggestion"],
+ res["lorm"][1]["suggestion"],
+ res["lorm"][2]["suggestion"],
+ ) == ("lorem", "lore", "lorm")
assert (res["lorm"][0]["score"], res["lorm"][1]["score"]) == ("0.5", "0")
# test spellcheck exclude
@@ -873,7 +795,7 @@ def test_scorer(client):
)
client.ft().add_document(
"doc2",
- description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.", # noqa
+ description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.", # noqa
)
# default scorer is TFIDF
@@ -881,8 +803,7 @@ def test_scorer(client):
assert 1.0 == res.docs[0].score
res = client.ft().search(Query("quick").scorer("TFIDF").with_scores())
assert 1.0 == res.docs[0].score
- res = client.ft().search(
- Query("quick").scorer("TFIDF.DOCNORM").with_scores())
+ res = client.ft().search(Query("quick").scorer("TFIDF.DOCNORM").with_scores())
assert 0.1111111111111111 == res.docs[0].score
res = client.ft().search(Query("quick").scorer("BM25").with_scores())
assert 0.17699114465425977 == res.docs[0].score
@@ -1060,7 +981,7 @@ def test_aggregations_groupby(client):
)
res = client.ft().aggregate(req).rows[0]
- assert res == ['parent', 'redis', 'first', 'RediSearch']
+ assert res == ["parent", "redis", "first", "RediSearch"]
req = aggregations.AggregateRequest("redis").group_by(
"@parent",
@@ -1083,35 +1004,33 @@ def test_aggregations_sort_by_and_limit(client):
)
)
- client.ft().client.hset("doc1", mapping={'t1': 'a', 't2': 'b'})
- client.ft().client.hset("doc2", mapping={'t1': 'b', 't2': 'a'})
+ client.ft().client.hset("doc1", mapping={"t1": "a", "t2": "b"})
+ client.ft().client.hset("doc2", mapping={"t1": "b", "t2": "a"})
# test sort_by using SortDirection
- req = aggregations.AggregateRequest("*") \
- .sort_by(aggregations.Asc("@t2"), aggregations.Desc("@t1"))
+ req = aggregations.AggregateRequest("*").sort_by(
+ aggregations.Asc("@t2"), aggregations.Desc("@t1")
+ )
res = client.ft().aggregate(req)
- assert res.rows[0] == ['t2', 'a', 't1', 'b']
- assert res.rows[1] == ['t2', 'b', 't1', 'a']
+ assert res.rows[0] == ["t2", "a", "t1", "b"]
+ assert res.rows[1] == ["t2", "b", "t1", "a"]
# test sort_by without SortDirection
- req = aggregations.AggregateRequest("*") \
- .sort_by("@t1")
+ req = aggregations.AggregateRequest("*").sort_by("@t1")
res = client.ft().aggregate(req)
- assert res.rows[0] == ['t1', 'a']
- assert res.rows[1] == ['t1', 'b']
+ assert res.rows[0] == ["t1", "a"]
+ assert res.rows[1] == ["t1", "b"]
# test sort_by with max
- req = aggregations.AggregateRequest("*") \
- .sort_by("@t1", max=1)
+ req = aggregations.AggregateRequest("*").sort_by("@t1", max=1)
res = client.ft().aggregate(req)
assert len(res.rows) == 1
# test limit
- req = aggregations.AggregateRequest("*") \
- .sort_by("@t1").limit(1, 1)
+ req = aggregations.AggregateRequest("*").sort_by("@t1").limit(1, 1)
res = client.ft().aggregate(req)
assert len(res.rows) == 1
- assert res.rows[0] == ['t1', 'b']
+ assert res.rows[0] == ["t1", "b"]
@pytest.mark.redismod
@@ -1123,17 +1042,17 @@ def test_aggregations_load(client):
)
)
- client.ft().client.hset("doc1", mapping={'t1': 'hello', 't2': 'world'})
+ client.ft().client.hset("doc1", mapping={"t1": "hello", "t2": "world"})
# load t1
req = aggregations.AggregateRequest("*").load("t1")
res = client.ft().aggregate(req)
- assert res.rows[0] == ['t1', 'hello']
+ assert res.rows[0] == ["t1", "hello"]
# load t2
req = aggregations.AggregateRequest("*").load("t2")
res = client.ft().aggregate(req)
- assert res.rows[0] == ['t2', 'world']
+ assert res.rows[0] == ["t2", "world"]
@pytest.mark.redismod
@@ -1147,24 +1066,19 @@ def test_aggregations_apply(client):
client.ft().client.hset(
"doc1",
- mapping={
- 'PrimaryKey': '9::362330',
- 'CreatedDateTimeUTC': '637387878524969984'
- }
+ mapping={"PrimaryKey": "9::362330", "CreatedDateTimeUTC": "637387878524969984"},
)
client.ft().client.hset(
"doc2",
- mapping={
- 'PrimaryKey': '9::362329',
- 'CreatedDateTimeUTC': '637387875859270016'
- }
+ mapping={"PrimaryKey": "9::362329", "CreatedDateTimeUTC": "637387875859270016"},
)
- req = aggregations.AggregateRequest("*") \
- .apply(CreatedDateTimeUTC='@CreatedDateTimeUTC * 10')
+ req = aggregations.AggregateRequest("*").apply(
+ CreatedDateTimeUTC="@CreatedDateTimeUTC * 10"
+ )
res = client.ft().aggregate(req)
- assert res.rows[0] == ['CreatedDateTimeUTC', '6373878785249699840']
- assert res.rows[1] == ['CreatedDateTimeUTC', '6373878758592700416']
+ assert res.rows[0] == ["CreatedDateTimeUTC", "6373878785249699840"]
+ assert res.rows[1] == ["CreatedDateTimeUTC", "6373878758592700416"]
@pytest.mark.redismod
@@ -1176,33 +1090,19 @@ def test_aggregations_filter(client):
)
)
- client.ft().client.hset(
- "doc1",
- mapping={
- 'name': 'bar',
- 'age': '25'
- }
- )
- client.ft().client.hset(
- "doc2",
- mapping={
- 'name': 'foo',
- 'age': '19'
- }
- )
+ client.ft().client.hset("doc1", mapping={"name": "bar", "age": "25"})
+ client.ft().client.hset("doc2", mapping={"name": "foo", "age": "19"})
- req = aggregations.AggregateRequest("*") \
- .filter("@name=='foo' && @age < 20")
+ req = aggregations.AggregateRequest("*").filter("@name=='foo' && @age < 20")
res = client.ft().aggregate(req)
assert len(res.rows) == 1
- assert res.rows[0] == ['name', 'foo', 'age', '19']
+ assert res.rows[0] == ["name", "foo", "age", "19"]
- req = aggregations.AggregateRequest("*") \
- .filter("@age > 15").sort_by("@age")
+ req = aggregations.AggregateRequest("*").filter("@age > 15").sort_by("@age")
res = client.ft().aggregate(req)
assert len(res.rows) == 2
- assert res.rows[0] == ['age', '19']
- assert res.rows[1] == ['age', '25']
+ assert res.rows[0] == ["age", "19"]
+ assert res.rows[1] == ["age", "25"]
@pytest.mark.redismod
@@ -1226,25 +1126,25 @@ def test_index_definition(client):
)
assert [
- "ON",
- "JSON",
- "PREFIX",
- 2,
- "hset:",
- "henry",
- "FILTER",
- "@f1==32",
- "LANGUAGE_FIELD",
- "play",
- "LANGUAGE",
- "English",
- "SCORE_FIELD",
- "chapter",
- "SCORE",
- 0.5,
- "PAYLOAD_FIELD",
- "txt",
- ] == definition.args
+ "ON",
+ "JSON",
+ "PREFIX",
+ 2,
+ "hset:",
+ "henry",
+ "FILTER",
+ "@f1==32",
+ "LANGUAGE_FIELD",
+ "play",
+ "LANGUAGE",
+ "English",
+ "SCORE_FIELD",
+ "chapter",
+ "SCORE",
+ 0.5,
+ "PAYLOAD_FIELD",
+ "txt",
+ ] == definition.args
createIndex(client.ft(), num_docs=500, definition=definition)
@@ -1274,10 +1174,7 @@ def test_create_client_definition_hash(client):
Create definition with IndexType.HASH as index type (ON HASH),
and use hset to test the client definition.
"""
- definition = IndexDefinition(
- prefix=["hset:", "henry"],
- index_type=IndexType.HASH
- )
+ definition = IndexDefinition(prefix=["hset:", "henry"], index_type=IndexType.HASH)
createIndex(client.ft(), num_docs=500, definition=definition)
info = client.ft().info()
@@ -1320,15 +1217,10 @@ def test_fields_as_name(client):
client.ft().create_index(SCHEMA, definition=definition)
# insert json data
- res = client.json().set(
- "doc:1",
- Path.rootPath(),
- {"name": "Jon", "age": 25}
- )
+ res = client.json().set("doc:1", Path.rootPath(), {"name": "Jon", "age": 25})
assert res
- total = client.ft().search(
- Query("Jon").return_fields("name", "just_a_number")).docs
+ total = client.ft().search(Query("Jon").return_fields("name", "just_a_number")).docs
assert 1 == len(total)
assert "doc:1" == total[0].id
assert "Jon" == total[0].name
@@ -1354,14 +1246,12 @@ def test_search_return_fields(client):
client.ft().create_index(SCHEMA, definition=definition)
waitForIndex(client, "idx")
- total = client.ft().search(
- Query("*").return_field("$.t", as_field="txt")).docs
+ total = client.ft().search(Query("*").return_field("$.t", as_field="txt")).docs
assert 1 == len(total)
assert "doc:1" == total[0].id
assert "riceratops" == total[0].txt
- total = client.ft().search(
- Query("*").return_field("$.t2", as_field="txt")).docs
+ total = client.ft().search(Query("*").return_field("$.t2", as_field="txt")).docs
assert 1 == len(total)
assert "doc:1" == total[0].id
assert "telmatosaurus" == total[0].txt
@@ -1379,17 +1269,10 @@ def test_synupdate(client):
)
client.ft().synupdate("id1", True, "boy", "child", "offspring")
- client.ft().add_document(
- "doc1",
- title="he is a baby",
- body="this is a test")
+ client.ft().add_document("doc1", title="he is a baby", body="this is a test")
client.ft().synupdate("id1", True, "baby")
- client.ft().add_document(
- "doc2",
- title="he is another baby",
- body="another test"
- )
+ client.ft().add_document("doc2", title="he is another baby", body="another test")
res = client.ft().search(Query("child").expander("SYNONYM"))
assert res.docs[0].id == "doc2"
@@ -1431,15 +1314,12 @@ def test_create_json_with_alias(client):
"""
definition = IndexDefinition(prefix=["king:"], index_type=IndexType.JSON)
client.ft().create_index(
- (TextField("$.name", as_name="name"),
- NumericField("$.num", as_name="num")),
- definition=definition
+ (TextField("$.name", as_name="name"), NumericField("$.num", as_name="num")),
+ definition=definition,
)
- client.json().set("king:1", Path.rootPath(), {"name": "henry",
- "num": 42})
- client.json().set("king:2", Path.rootPath(), {"name": "james",
- "num": 3.14})
+ client.json().set("king:1", Path.rootPath(), {"name": "henry", "num": 42})
+ client.json().set("king:2", Path.rootPath(), {"name": "james", "num": 3.14})
res = client.ft().search("@name:henry")
assert res.docs[0].id == "king:1"
@@ -1466,12 +1346,12 @@ def test_json_with_multipath(client):
"""
definition = IndexDefinition(prefix=["king:"], index_type=IndexType.JSON)
client.ft().create_index(
- (TagField("$..name", as_name="name")),
- definition=definition
+ (TagField("$..name", as_name="name")), definition=definition
)
- client.json().set("king:1", Path.rootPath(),
- {"name": "henry", "country": {"name": "england"}})
+ client.json().set(
+ "king:1", Path.rootPath(), {"name": "henry", "country": {"name": "england"}}
+ )
res = client.ft().search("@name:{henry}")
assert res.docs[0].id == "king:1"
@@ -1489,9 +1369,11 @@ def test_json_with_multipath(client):
def test_json_with_jsonpath(client):
definition = IndexDefinition(index_type=IndexType.JSON)
client.ft().create_index(
- (TextField('$["prod:name"]', as_name="name"),
- TextField('$.prod:name', as_name="name_unsupported")),
- definition=definition
+ (
+ TextField('$["prod:name"]', as_name="name"),
+ TextField("$.prod:name", as_name="name_unsupported"),
+ ),
+ definition=definition,
)
client.json().set("doc:1", Path.rootPath(), {"prod:name": "RediSearch"})
@@ -1510,11 +1392,10 @@ def test_json_with_jsonpath(client):
res = client.ft().search(Query("@name:RediSearch").return_field("name"))
assert res.total == 1
assert res.docs[0].id == "doc:1"
- assert res.docs[0].name == 'RediSearch'
+ assert res.docs[0].name == "RediSearch"
# return of an unsupported field fails
- res = client.ft().search(Query("@name:RediSearch")
- .return_field("name_unsupported"))
+ res = client.ft().search(Query("@name:RediSearch").return_field("name_unsupported"))
assert res.total == 1
assert res.docs[0].id == "doc:1"
with pytest.raises(Exception):
@@ -1523,42 +1404,49 @@ def test_json_with_jsonpath(client):
@pytest.mark.redismod
def test_profile(client):
- client.ft().create_index((TextField('t'),))
- client.ft().client.hset('1', 't', 'hello')
- client.ft().client.hset('2', 't', 'world')
+ client.ft().create_index((TextField("t"),))
+ client.ft().client.hset("1", "t", "hello")
+ client.ft().client.hset("2", "t", "world")
# check using Query
- q = Query('hello|world').no_content()
+ q = Query("hello|world").no_content()
res, det = client.ft().profile(q)
- assert det['Iterators profile']['Counter'] == 2.0
- assert len(det['Iterators profile']['Child iterators']) == 2
- assert det['Iterators profile']['Type'] == 'UNION'
- assert det['Parsing time'] < 0.3
+ assert det["Iterators profile"]["Counter"] == 2.0
+ assert len(det["Iterators profile"]["Child iterators"]) == 2
+ assert det["Iterators profile"]["Type"] == "UNION"
+ assert det["Parsing time"] < 0.3
assert len(res.docs) == 2 # check also the search result
# check using AggregateRequest
- req = aggregations.AggregateRequest("*").load("t")\
+ req = (
+ aggregations.AggregateRequest("*")
+ .load("t")
.apply(prefix="startswith(@t, 'hel')")
+ )
res, det = client.ft().profile(req)
- assert det['Iterators profile']['Counter'] == 2.0
- assert det['Iterators profile']['Type'] == 'WILDCARD'
- assert det['Parsing time'] < 0.3
+ assert det["Iterators profile"]["Counter"] == 2.0
+ assert det["Iterators profile"]["Type"] == "WILDCARD"
+ assert det["Parsing time"] < 0.3
assert len(res.rows) == 2 # check also the search result
@pytest.mark.redismod
def test_profile_limited(client):
- client.ft().create_index((TextField('t'),))
- client.ft().client.hset('1', 't', 'hello')
- client.ft().client.hset('2', 't', 'hell')
- client.ft().client.hset('3', 't', 'help')
- client.ft().client.hset('4', 't', 'helowa')
+ client.ft().create_index((TextField("t"),))
+ client.ft().client.hset("1", "t", "hello")
+ client.ft().client.hset("2", "t", "hell")
+ client.ft().client.hset("3", "t", "help")
+ client.ft().client.hset("4", "t", "helowa")
- q = Query('%hell% hel*')
+ q = Query("%hell% hel*")
res, det = client.ft().profile(q, limited=True)
- assert det['Iterators profile']['Child iterators'][0]['Child iterators'] \
- == 'The number of iterators in the union is 3'
- assert det['Iterators profile']['Child iterators'][1]['Child iterators'] \
- == 'The number of iterators in the union is 4'
- assert det['Iterators profile']['Type'] == 'INTERSECT'
+ assert (
+ det["Iterators profile"]["Child iterators"][0]["Child iterators"]
+ == "The number of iterators in the union is 3"
+ )
+ assert (
+ det["Iterators profile"]["Child iterators"][1]["Child iterators"]
+ == "The number of iterators in the union is 4"
+ )
+ assert det["Iterators profile"]["Type"] == "INTERSECT"
assert len(res.docs) == 3 # check also the search result
diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py
index 9377d5b..0357443 100644
--- a/tests/test_sentinel.py
+++ b/tests/test_sentinel.py
@@ -2,10 +2,14 @@ import socket
import pytest
-from redis import exceptions
-from redis.sentinel import (Sentinel, SentinelConnectionPool,
- MasterNotFoundError, SlaveNotFoundError)
import redis.sentinel
+from redis import exceptions
+from redis.sentinel import (
+ MasterNotFoundError,
+ Sentinel,
+ SentinelConnectionPool,
+ SlaveNotFoundError,
+)
@pytest.fixture(scope="module")
@@ -33,20 +37,20 @@ class SentinelTestClient:
def execute_command(self, *args, **kwargs):
# wrapper purely to validate the calls don't explode
from redis.client import bool_ok
+
return bool_ok
class SentinelTestCluster:
- def __init__(self, servisentinel_ce_name='mymaster', ip='127.0.0.1',
- port=6379):
+ def __init__(self, servisentinel_ce_name="mymaster", ip="127.0.0.1", port=6379):
self.clients = {}
self.master = {
- 'ip': ip,
- 'port': port,
- 'is_master': True,
- 'is_sdown': False,
- 'is_odown': False,
- 'num-other-sentinels': 0,
+ "ip": ip,
+ "port": port,
+ "is_master": True,
+ "is_sdown": False,
+ "is_odown": False,
+ "num-other-sentinels": 0,
}
self.service_name = servisentinel_ce_name
self.slaves = []
@@ -69,6 +73,7 @@ class SentinelTestCluster:
def cluster(request, master_ip):
def teardown():
redis.sentinel.Redis = saved_Redis
+
cluster = SentinelTestCluster(ip=master_ip)
saved_Redis = redis.sentinel.Redis
redis.sentinel.Redis = cluster.client
@@ -78,126 +83,121 @@ def cluster(request, master_ip):
@pytest.fixture()
def sentinel(request, cluster):
- return Sentinel([('foo', 26379), ('bar', 26379)])
+ return Sentinel([("foo", 26379), ("bar", 26379)])
@pytest.mark.onlynoncluster
def test_discover_master(sentinel, master_ip):
- address = sentinel.discover_master('mymaster')
+ address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
- sentinel.discover_master('xxx')
+ sentinel.discover_master("xxx")
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
- cluster.nodes_down.add(('foo', 26379))
- address = sentinel.discover_master('mymaster')
+ cluster.nodes_down.add(("foo", 26379))
+ address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
- assert sentinel.sentinels[0].id == ('bar', 26379)
+ assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
- cluster.nodes_timeout.add(('foo', 26379))
- address = sentinel.discover_master('mymaster')
+ cluster.nodes_timeout.add(("foo", 26379))
+ address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
- assert sentinel.sentinels[0].id == ('bar', 26379)
+ assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_master_min_other_sentinels(cluster, master_ip):
- sentinel = Sentinel([('foo', 26379)], min_other_sentinels=1)
+ sentinel = Sentinel([("foo", 26379)], min_other_sentinels=1)
# min_other_sentinels
with pytest.raises(MasterNotFoundError):
- sentinel.discover_master('mymaster')
- cluster.master['num-other-sentinels'] = 2
- address = sentinel.discover_master('mymaster')
+ sentinel.discover_master("mymaster")
+ cluster.master["num-other-sentinels"] = 2
+ address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_master_odown(cluster, sentinel):
- cluster.master['is_odown'] = True
+ cluster.master["is_odown"] = True
with pytest.raises(MasterNotFoundError):
- sentinel.discover_master('mymaster')
+ sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_master_sdown(cluster, sentinel):
- cluster.master['is_sdown'] = True
+ cluster.master["is_sdown"] = True
with pytest.raises(MasterNotFoundError):
- sentinel.discover_master('mymaster')
+ sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_discover_slaves(cluster, sentinel):
- assert sentinel.discover_slaves('mymaster') == []
+ assert sentinel.discover_slaves("mymaster") == []
cluster.slaves = [
- {'ip': 'slave0', 'port': 1234, 'is_odown': False, 'is_sdown': False},
- {'ip': 'slave1', 'port': 1234, 'is_odown': False, 'is_sdown': False},
+ {"ip": "slave0", "port": 1234, "is_odown": False, "is_sdown": False},
+ {"ip": "slave1", "port": 1234, "is_odown": False, "is_sdown": False},
]
- assert sentinel.discover_slaves('mymaster') == [
- ('slave0', 1234), ('slave1', 1234)]
+ assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
# slave0 -> ODOWN
- cluster.slaves[0]['is_odown'] = True
- assert sentinel.discover_slaves('mymaster') == [
- ('slave1', 1234)]
+ cluster.slaves[0]["is_odown"] = True
+ assert sentinel.discover_slaves("mymaster") == [("slave1", 1234)]
# slave1 -> SDOWN
- cluster.slaves[1]['is_sdown'] = True
- assert sentinel.discover_slaves('mymaster') == []
+ cluster.slaves[1]["is_sdown"] = True
+ assert sentinel.discover_slaves("mymaster") == []
- cluster.slaves[0]['is_odown'] = False
- cluster.slaves[1]['is_sdown'] = False
+ cluster.slaves[0]["is_odown"] = False
+ cluster.slaves[1]["is_sdown"] = False
# node0 -> DOWN
- cluster.nodes_down.add(('foo', 26379))
- assert sentinel.discover_slaves('mymaster') == [
- ('slave0', 1234), ('slave1', 1234)]
+ cluster.nodes_down.add(("foo", 26379))
+ assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
cluster.nodes_down.clear()
# node0 -> TIMEOUT
- cluster.nodes_timeout.add(('foo', 26379))
- assert sentinel.discover_slaves('mymaster') == [
- ('slave0', 1234), ('slave1', 1234)]
+ cluster.nodes_timeout.add(("foo", 26379))
+ assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
@pytest.mark.onlynoncluster
def test_master_for(cluster, sentinel, master_ip):
- master = sentinel.master_for('mymaster', db=9)
+ master = sentinel.master_for("mymaster", db=9)
assert master.ping()
assert master.connection_pool.master_address == (master_ip, 6379)
# Use internal connection check
- master = sentinel.master_for('mymaster', db=9, check_connection=True)
+ master = sentinel.master_for("mymaster", db=9, check_connection=True)
assert master.ping()
@pytest.mark.onlynoncluster
def test_slave_for(cluster, sentinel):
cluster.slaves = [
- {'ip': '127.0.0.1', 'port': 6379,
- 'is_odown': False, 'is_sdown': False},
+ {"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False},
]
- slave = sentinel.slave_for('mymaster', db=9)
+ slave = sentinel.slave_for("mymaster", db=9)
assert slave.ping()
@pytest.mark.onlynoncluster
def test_slave_for_slave_not_found_error(cluster, sentinel):
- cluster.master['is_odown'] = True
- slave = sentinel.slave_for('mymaster', db=9)
+ cluster.master["is_odown"] = True
+ slave = sentinel.slave_for("mymaster", db=9)
with pytest.raises(SlaveNotFoundError):
slave.ping()
@@ -205,13 +205,13 @@ def test_slave_for_slave_not_found_error(cluster, sentinel):
@pytest.mark.onlynoncluster
def test_slave_round_robin(cluster, sentinel, master_ip):
cluster.slaves = [
- {'ip': 'slave0', 'port': 6379, 'is_odown': False, 'is_sdown': False},
- {'ip': 'slave1', 'port': 6379, 'is_odown': False, 'is_sdown': False},
+ {"ip": "slave0", "port": 6379, "is_odown": False, "is_sdown": False},
+ {"ip": "slave1", "port": 6379, "is_odown": False, "is_sdown": False},
]
- pool = SentinelConnectionPool('mymaster', sentinel)
+ pool = SentinelConnectionPool("mymaster", sentinel)
rotator = pool.rotate_slaves()
- assert next(rotator) in (('slave0', 6379), ('slave1', 6379))
- assert next(rotator) in (('slave0', 6379), ('slave1', 6379))
+ assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
+ assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
# Fallback to master
assert next(rotator) == (master_ip, 6379)
with pytest.raises(SlaveNotFoundError):
@@ -230,5 +230,5 @@ def test_flushconfig(cluster, sentinel):
@pytest.mark.onlynoncluster
def test_reset(cluster, sentinel):
- cluster.master['is_odown'] = True
- assert sentinel.sentinel_reset('mymaster')
+ cluster.master["is_odown"] = True
+ assert sentinel.sentinel_reset("mymaster")
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index 0743357..8c97ab8 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -1,6 +1,8 @@
-import pytest
import time
from time import sleep
+
+import pytest
+
from .conftest import skip_ifmodversion_lt
@@ -68,8 +70,7 @@ def test_add(client):
assert 4 == client.ts().add(
4, 4, 2, retention_msecs=10, labels={"Redis": "Labs", "Time": "Series"}
)
- assert round(time.time()) == \
- round(float(client.ts().add(5, "*", 1)) / 1000)
+ assert round(time.time()) == round(float(client.ts().add(5, "*", 1)) / 1000)
info = client.ts().info(4)
assert 10 == info.retention_msecs
@@ -88,12 +89,7 @@ def test_add_duplicate_policy(client):
# Test for duplicate policy BLOCK
assert 1 == client.ts().add("time-serie-add-ooo-block", 1, 5.0)
with pytest.raises(Exception):
- client.ts().add(
- "time-serie-add-ooo-block",
- 1,
- 5.0,
- duplicate_policy="block"
- )
+ client.ts().add("time-serie-add-ooo-block", 1, 5.0, duplicate_policy="block")
# Test for duplicate policy LAST
assert 1 == client.ts().add("time-serie-add-ooo-last", 1, 5.0)
@@ -127,8 +123,7 @@ def test_add_duplicate_policy(client):
@pytest.mark.redismod
def test_madd(client):
client.ts().create("a")
- assert [1, 2, 3] == \
- client.ts().madd([("a", 1, 5), ("a", 2, 10), ("a", 3, 15)])
+ assert [1, 2, 3] == client.ts().madd([("a", 1, 5), ("a", 2, 10), ("a", 3, 15)])
@pytest.mark.redismod
@@ -206,13 +201,7 @@ def test_range(client):
assert 200 == len(client.ts().range(1, 0, 500))
# last sample isn't returned
assert 20 == len(
- client.ts().range(
- 1,
- 0,
- 500,
- aggregation_type="avg",
- bucket_size_msec=10
- )
+ client.ts().range(1, 0, 500, aggregation_type="avg", bucket_size_msec=10)
)
assert 10 == len(client.ts().range(1, 0, 500, count=10))
@@ -253,13 +242,7 @@ def test_rev_range(client):
assert 200 == len(client.ts().range(1, 0, 500))
# first sample isn't returned
assert 20 == len(
- client.ts().revrange(
- 1,
- 0,
- 500,
- aggregation_type="avg",
- bucket_size_msec=10
- )
+ client.ts().revrange(1, 0, 500, aggregation_type="avg", bucket_size_msec=10)
)
assert 10 == len(client.ts().revrange(1, 0, 500, count=10))
assert 2 == len(
@@ -283,10 +266,7 @@ def test_rev_range(client):
@pytest.mark.redismod
def testMultiRange(client):
client.ts().create(1, labels={"Test": "This", "team": "ny"})
- client.ts().create(
- 2,
- labels={"Test": "This", "Taste": "That", "team": "sf"}
- )
+ client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
for i in range(100):
client.ts().add(1, i, i % 7)
client.ts().add(2, i, i % 11)
@@ -301,11 +281,7 @@ def testMultiRange(client):
for i in range(100):
client.ts().add(1, i + 200, i % 7)
res = client.ts().mrange(
- 0,
- 500,
- filters=["Test=This"],
- aggregation_type="avg",
- bucket_size_msec=10
+ 0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
)
assert 2 == len(res)
assert 20 == len(res[0]["1"][1])
@@ -320,21 +296,13 @@ def testMultiRange(client):
@skip_ifmodversion_lt("99.99.99", "timeseries")
def test_multi_range_advanced(client):
client.ts().create(1, labels={"Test": "This", "team": "ny"})
- client.ts().create(
- 2,
- labels={"Test": "This", "Taste": "That", "team": "sf"}
- )
+ client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
for i in range(100):
client.ts().add(1, i, i % 7)
client.ts().add(2, i, i % 11)
# test with selected labels
- res = client.ts().mrange(
- 0,
- 200,
- filters=["Test=This"],
- select_labels=["team"]
- )
+ res = client.ts().mrange(0, 200, filters=["Test=This"], select_labels=["team"])
assert {"team": "ny"} == res[0]["1"][0]
assert {"team": "sf"} == res[1]["2"][0]
@@ -350,28 +318,11 @@ def test_multi_range_advanced(client):
assert [(15, 1.0), (16, 2.0)] == res[0]["1"][1]
# test groupby
- res = client.ts().mrange(
- 0,
- 3,
- filters=["Test=This"],
- groupby="Test",
- reduce="sum"
- )
+ res = client.ts().mrange(0, 3, filters=["Test=This"], groupby="Test", reduce="sum")
assert [(0, 0.0), (1, 2.0), (2, 4.0), (3, 6.0)] == res[0]["Test=This"][1]
- res = client.ts().mrange(
- 0,
- 3,
- filters=["Test=This"],
- groupby="Test",
- reduce="max"
- )
+ res = client.ts().mrange(0, 3, filters=["Test=This"], groupby="Test", reduce="max")
assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[0]["Test=This"][1]
- res = client.ts().mrange(
- 0,
- 3,
- filters=["Test=This"],
- groupby="team",
- reduce="min")
+ res = client.ts().mrange(0, 3, filters=["Test=This"], groupby="team", reduce="min")
assert 2 == len(res)
assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[0]["team=ny"][1]
assert [(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)] == res[1]["team=sf"][1]
@@ -401,10 +352,7 @@ def test_multi_range_advanced(client):
@skip_ifmodversion_lt("99.99.99", "timeseries")
def test_multi_reverse_range(client):
client.ts().create(1, labels={"Test": "This", "team": "ny"})
- client.ts().create(
- 2,
- labels={"Test": "This", "Taste": "That", "team": "sf"}
- )
+ client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
for i in range(100):
client.ts().add(1, i, i % 7)
client.ts().add(2, i, i % 11)
@@ -419,31 +367,18 @@ def test_multi_reverse_range(client):
for i in range(100):
client.ts().add(1, i + 200, i % 7)
res = client.ts().mrevrange(
- 0,
- 500,
- filters=["Test=This"],
- aggregation_type="avg",
- bucket_size_msec=10
+ 0, 500, filters=["Test=This"], aggregation_type="avg", bucket_size_msec=10
)
assert 2 == len(res)
assert 20 == len(res[0]["1"][1])
assert {} == res[0]["1"][0]
# test withlabels
- res = client.ts().mrevrange(
- 0,
- 200,
- filters=["Test=This"],
- with_labels=True
- )
+ res = client.ts().mrevrange(0, 200, filters=["Test=This"], with_labels=True)
assert {"Test": "This", "team": "ny"} == res[0]["1"][0]
# test with selected labels
- res = client.ts().mrevrange(
- 0,
- 200,
- filters=["Test=This"], select_labels=["team"]
- )
+ res = client.ts().mrevrange(0, 200, filters=["Test=This"], select_labels=["team"])
assert {"team": "ny"} == res[0]["1"][0]
assert {"team": "sf"} == res[1]["2"][0]
@@ -529,11 +464,7 @@ def test_mget(client):
@pytest.mark.redismod
def test_info(client):
- client.ts().create(
- 1,
- retention_msecs=5,
- labels={"currentLabel": "currentData"}
- )
+ client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
info = client.ts().info(1)
assert 5 == info.retention_msecs
assert info.labels["currentLabel"] == "currentData"
@@ -542,11 +473,7 @@ def test_info(client):
@pytest.mark.redismod
@skip_ifmodversion_lt("1.4.0", "timeseries")
def testInfoDuplicatePolicy(client):
- client.ts().create(
- 1,
- retention_msecs=5,
- labels={"currentLabel": "currentData"}
- )
+ client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
info = client.ts().info(1)
assert info.duplicate_policy is None