summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChayim <chayim@users.noreply.github.com>2021-11-30 17:23:03 +0200
committerGitHub <noreply@github.com>2021-11-30 17:23:03 +0200
commitc0b38584dc48f821606150d7965dca88c402192b (patch)
treeefb58c68c48a969c4b9495e2c2b419a4813db158
parenta552c4717f5bf383de2a443dcebc7ca7ee5bd802 (diff)
parentd5247091464f91f06d8ca71bb785b448a0d4cc3e (diff)
downloadredis-py-c0b38584dc48f821606150d7965dca88c402192b.tar.gz
Merge branch 'master' into ROLE
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md1
-rwxr-xr-x.github/workflows/install_and_test.sh6
-rw-r--r--.github/workflows/pypi-publish.yaml4
-rw-r--r--.gitignore2
-rw-r--r--CONTRIBUTING.md8
-rw-r--r--README.md270
-rw-r--r--benchmarks/base.py6
-rw-r--r--benchmarks/basic_operations.py23
-rw-r--r--benchmarks/command_packer_benchmark.py11
-rw-r--r--dev_requirements.txt1
-rw-r--r--docker/base/Dockerfile.cluster8
-rw-r--r--docker/base/create_cluster.sh26
-rw-r--r--docker/cluster/redis.conf3
-rw-r--r--docs/backoff.rst5
-rw-r--r--docs/conf.py22
-rw-r--r--docs/connections.rst12
-rw-r--r--docs/exceptions.rst7
-rw-r--r--docs/genindex.rst2
-rw-r--r--docs/index.rst78
-rw-r--r--docs/lock.rst5
-rw-r--r--docs/redis_core_commands.rst14
-rw-r--r--docs/redismodules.rst19
-rw-r--r--docs/requirements.txt1
-rw-r--r--docs/retry.rst5
-rw-r--r--docs/sentinel_commands.rst20
-rw-r--r--examples/README.md3
-rw-r--r--redis/__init__.py4
-rwxr-xr-xredis/client.py101
-rw-r--r--redis/cluster.py2060
-rw-r--r--redis/commands/__init__.py10
-rw-r--r--redis/commands/cluster.py922
-rw-r--r--redis/commands/core.py204
-rw-r--r--redis/commands/helpers.py43
-rw-r--r--redis/commands/json/commands.py86
-rw-r--r--redis/commands/json/path.py2
-rw-r--r--redis/commands/parser.py119
-rw-r--r--redis/commands/search/__init__.py2
-rw-r--r--redis/commands/search/aggregation.py33
-rw-r--r--redis/commands/search/commands.py173
-rw-r--r--redis/commands/search/document.py4
-rw-r--r--redis/commands/search/field.py2
-rw-r--r--redis/commands/search/indexDefinition.py5
-rw-r--r--redis/commands/search/query.py10
-rw-r--r--redis/commands/search/querystring.py22
-rw-r--r--redis/commands/search/reducers.py24
-rw-r--r--redis/commands/search/result.py4
-rw-r--r--redis/commands/search/suggestion.py4
-rw-r--r--redis/commands/timeseries/commands.py94
-rw-r--r--redis/commands/timeseries/info.py2
-rwxr-xr-xredis/connection.py119
-rw-r--r--redis/crc.py24
-rw-r--r--redis/exceptions.py102
-rw-r--r--redis/sentinel.py32
-rw-r--r--redis/utils.py36
-rw-r--r--requirements.txt1
-rw-r--r--setup.py3
-rw-r--r--tasks.py29
-rw-r--r--tests/conftest.py110
-rw-r--r--tests/test_cluster.py2477
-rw-r--r--tests/test_command_parser.py62
-rw-r--r--tests/test_commands.py179
-rw-r--r--tests/test_connection.py3
-rw-r--r--tests/test_connection_pool.py5
-rw-r--r--tests/test_helpers.py26
-rw-r--r--tests/test_json.py54
-rw-r--r--tests/test_lock.py2
-rw-r--r--tests/test_monitor.py2
-rw-r--r--tests/test_multiprocessing.py7
-rw-r--r--tests/test_pipeline.py16
-rw-r--r--tests/test_pubsub.py15
-rw-r--r--tests/test_scripting.py1
-rw-r--r--tests/test_search.py369
-rw-r--r--tests/test_sentinel.py15
-rw-r--r--tests/test_timeseries.py3
-rw-r--r--tox.ini38
75 files changed, 7664 insertions, 558 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 2bbc804..58062a1 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -6,6 +6,7 @@ _Please make sure to review and check all of these items:_
- [ ] Do the CI tests pass with this change (enable it first in your forked repo and wait for the github action build to finish)?
- [ ] Is the new or changed code fully tested?
- [ ] Is a documentation update included (if this change modifies existing APIs, or introduces new ones)?
+- [ ] Is there an example added to the examples folder (if applicable)?
_NOTE: these things are not required to open a PR and can be done
afterwards / while the PR is open._
diff --git a/.github/workflows/install_and_test.sh b/.github/workflows/install_and_test.sh
index 330102e..7a8cd67 100755
--- a/.github/workflows/install_and_test.sh
+++ b/.github/workflows/install_and_test.sh
@@ -38,4 +38,8 @@ cd ${TESTDIR}
# install, run tests
pip install ${PKG}
-pytest
+# Redis tests
+pytest -m 'not onlycluster'
+# RedisCluster tests
+CLUSTER_URL="redis://localhost:16379/0"
+pytest -m 'not onlynoncluster and not redismod' --redis-url=${CLUSTER_URL}
diff --git a/.github/workflows/pypi-publish.yaml b/.github/workflows/pypi-publish.yaml
index b842c36..3cccb06 100644
--- a/.github/workflows/pypi-publish.yaml
+++ b/.github/workflows/pypi-publish.yaml
@@ -13,7 +13,7 @@ jobs:
- name: install python
uses: actions/setup-python@v2
with:
- python-version: 3.0
+ python-version: 3.9
- name: Install dev tools
run: |
pip install -r dev_requirements.txt
@@ -22,7 +22,7 @@ jobs:
- name: Build package
run: |
python setup.py build
- python setup.py dist bdist_wheel
+ python setup.py sdist bdist_wheel
- name: Publish to Pypi
uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/.gitignore b/.gitignore
index 05c3846..08138d7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ env
venv
coverage.xml
.venv
+*.xml
+.coverage*
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index af067e7..fe37ff9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -68,6 +68,14 @@ configuration](https://redis.io/topics/sentinel).
## Testing
+Call `invoke tests` to run all tests, or `invoke all-tests` to run linters
+tests as well. With the 'tests' and 'all-tests' targets, all Redis and
+RedisCluster tests will be run.
+
+It is possible to run only Redis client tests (with cluster mode disabled) by
+using `invoke redis-tests`; similarly, RedisCluster tests can be run by using
+`invoke cluster-tests`.
+
Each run of tox starts and stops the various dockers required. Sometimes
things get stuck, an `invoke clean` can help.
diff --git a/README.md b/README.md
index f03053e..d068c68 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@ The Python interface to the Redis key-value store.
[![codecov](https://codecov.io/gh/redis/redis-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/redis/redis-py)
[![Total alerts](https://img.shields.io/lgtm/alerts/g/redis/redis-py.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/redis/redis-py/alerts/)
-[Installation](##installation) | [Contributing](##contributing) | [Getting Started](##getting-started) | [Connecting To Redis](##connecting-to-redis)
+[Installation](#installation) | [Contributing](#contributing) | [Getting Started](#getting-started) | [Connecting To Redis](#connecting-to-redis)
---------------------------------------------
@@ -948,8 +948,272 @@ C 3
### Cluster Mode
-redis-py does not currently support [Cluster
-Mode](https://redis.io/topics/cluster-tutorial).
+redis-py is now supports cluster mode and provides a client for
+[Redis Cluster](<https://redis.io/topics/cluster-tutorial>).
+
+The cluster client is based on Grokzen's
+[redis-py-cluster](https://github.com/Grokzen/redis-py-cluster), has added bug
+fixes, and now supersedes that library. Support for these changes is thanks to
+his contributions.
+
+
+**Create RedisCluster:**
+
+Connecting redis-py to a Redis Cluster instance(s) requires at a minimum a
+single node for cluster discovery. There are multiple ways in which a cluster
+instance can be created:
+
+- Using 'host' and 'port' arguments:
+
+``` pycon
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> rc = Redis(host='localhost', port=6379)
+ >>> print(rc.get_nodes())
+ [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6378,db=0>>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6377,db=0>>>]]
+```
+- Using the Redis URL specification:
+
+``` pycon
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> rc = Redis.from_url("redis://localhost:6379/0")
+```
+
+- Directly, via the ClusterNode class:
+
+``` pycon
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> from redis.cluster import ClusterNode
+ >>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)]
+ >>> rc = Redis(startup_nodes=nodes)
+```
+
+When a RedisCluster instance is being created it first attempts to establish a
+connection to one of the provided startup nodes. If none of the startup nodes
+are reachable, a 'RedisClusterException' will be thrown.
+After a connection to the one of the cluster's nodes is established, the
+RedisCluster instance will be initialized with 3 caches:
+a slots cache which maps each of the 16384 slots to the node/s handling them,
+a nodes cache that contains ClusterNode objects (name, host, port, redis connection)
+for all of the cluster's nodes, and a commands cache contains all the server
+supported commands that were retrieved using the Redis 'COMMAND' output.
+
+RedisCluster instance can be directly used to execute Redis commands. When a
+command is being executed through the cluster instance, the target node(s) will
+be internally determined. When using a key-based command, the target node will
+be the node that holds the key's slot.
+Cluster management commands and other commands that are not key-based have a
+parameter called 'target_nodes' where you can specify which nodes to execute
+the command on. In the absence of target_nodes, the command will be executed
+on the default cluster node. As part of cluster instance initialization, the
+cluster's default node is randomly selected from the cluster's primaries, and
+will be updated upon reinitialization. Using r.get_default_node(), you can
+get the cluster's default node, or you can change it using the
+'set_default_node' method.
+
+The 'target_nodes' parameter is explained in the following section,
+'Specifying Target Nodes'.
+
+``` pycon
+ >>> # target-nodes: the node that holds 'foo1's key slot
+ >>> rc.set('foo1', 'bar1')
+ >>> # target-nodes: the node that holds 'foo2's key slot
+ >>> rc.set('foo2', 'bar2')
+ >>> # target-nodes: the node that holds 'foo1's key slot
+ >>> print(rc.get('foo1'))
+ b'bar'
+ >>> # target-node: default-node
+ >>> print(rc.keys())
+ [b'foo1']
+ >>> # target-node: default-node
+ >>> rc.ping()
+```
+
+**Specifying Target Nodes:**
+
+As mentioned above, all non key-based RedisCluster commands accept the kwarg
+parameter 'target_nodes' that specifies the node/nodes that the command should
+be executed on.
+The best practice is to specify target nodes using RedisCluster class's node
+flags: PRIMARIES, REPLICAS, ALL_NODES, RANDOM. When a nodes flag is passed
+along with a command, it will be internally resolved to the relevant node/s.
+If the nodes topology of the cluster changes during the execution of a command,
+the client will be able to resolve the nodes flag again with the new topology
+and attempt to retry executing the command.
+
+``` pycon
+ >>> from redis.cluster import RedisCluster as Redis
+ >>> # run cluster-meet command on all of the cluster's nodes
+ >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES)
+ >>> # ping all replicas
+ >>> rc.ping(target_nodes=Redis.REPLICAS)
+ >>> # ping a specific node
+ >>> rc.ping(target_nodes=Redis.RANDOM)
+ >>> # get the keys from all cluster nodes
+ >>> rc.keys(target_nodes=Redis.ALL_NODES)
+ [b'foo1', b'foo2']
+ >>> # execute bgsave in all primaries
+ >>> rc.bgsave(Redis.PRIMARIES)
+```
+
+You could also pass ClusterNodes directly if you want to execute a command on a
+specific node / node group that isn't addressed by the nodes flag. However, if
+the command execution fails due to cluster topology changes, a retry attempt
+will not be made, since the passed target node/s may no longer be valid, and
+the relevant cluster or connection error will be returned.
+
+``` pycon
+ >>> node = rc.get_node('localhost', 6379)
+ >>> # Get the keys only for that specific node
+ >>> rc.keys(target_nodes=node)
+ >>> # get Redis info from a subset of primaries
+ >>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378]
+ >>> rc.info(target_nodes=subset_primaries)
+```
+
+In addition, the RedisCluster instance can query the Redis instance of a
+specific node and execute commands on that node directly. The Redis client,
+however, does not handle cluster failures and retries.
+
+``` pycon
+ >>> cluster_node = rc.get_node(host='localhost', port=6379)
+ >>> print(cluster_node)
+ [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>]
+ >>> r = cluster_node.redis_connection
+ >>> r.client_list()
+ [{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}]
+ >>> # Get the keys only for that specific node
+ >>> r.keys()
+ [b'foo1']
+```
+
+**Multi-key commands:**
+
+Redis supports multi-key commands in Cluster Mode, such as Set type unions or
+intersections, mset and mget, as long as the keys all hash to the same slot.
+By using RedisCluster client, you can use the known functions (e.g. mget, mset)
+to perform an atomic multi-key operation. However, you must ensure all keys are
+mapped to the same slot, otherwise a RedisClusterException will be thrown.
+Redis Cluster implements a concept called hash tags that can be used in order
+to force certain keys to be stored in the same hash slot, see
+[Keys hash tag](https://redis.io/topics/cluster-spec#keys-hash-tags).
+You can also use nonatomic for some of the multikey operations, and pass keys
+that aren't mapped to the same slot. The client will then map the keys to the
+relevant slots, sending the commands to the slots' node owners. Non-atomic
+operations batch the keys according to their hash value, and then each batch is
+sent separately to the slot's owner.
+
+``` pycon
+ # Atomic operations can be used when all keys are mapped to the same slot
+ >>> rc.mset({'{foo}1': 'bar1', '{foo}2': 'bar2'})
+ >>> rc.mget('{foo}1', '{foo}2')
+ [b'bar1', b'bar2']
+ # Non-atomic multi-key operations splits the keys into different slots
+ >>> rc.mset_nonatomic({'foo': 'value1', 'bar': 'value2', 'zzz': 'value3')
+ >>> rc.mget_nonatomic('foo', 'bar', 'zzz')
+ [b'value1', b'value2', b'value3']
+```
+
+**Cluster PubSub:**
+
+When a ClusterPubSub instance is created without specifying a node, a single
+node will be transparently chosen for the pubsub connection on the
+first command execution. The node will be determined by:
+ 1. Hashing the channel name in the request to find its keyslot
+ 2. Selecting a node that handles the keyslot: If read_from_replicas is
+ set to true, a replica can be selected.
+
+*Known limitations with pubsub:*
+
+Pattern subscribe and publish do not currently work properly due to key slots.
+If we hash a pattern like fo* we will receive a keyslot for that string but
+there are endless possibilities for channel names based on this pattern -
+unknowable in advance. This feature is not disabled but the commands are not
+currently recommended for use.
+See [redis-py-cluster documentation](https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html)
+ for more.
+
+``` pycon
+ >>> p1 = rc.pubsub()
+ # p1 connection will be set to the node that holds 'foo' keyslot
+ >>> p1.subscribe('foo')
+ # p2 connection will be set to node 'localhost:6379'
+ >>> p2 = rc.pubsub(rc.get_node('localhost', 6379))
+```
+
+**Read Only Mode**
+
+By default, Redis Cluster always returns MOVE redirection response on accessing
+a replica node. You can overcome this limitation and scale read commands by
+triggering READONLY mode.
+
+To enable READONLY mode pass read_from_replicas=True to RedisCluster
+constructor. When set to true, read commands will be assigned between the
+primary and its replications in a Round-Robin manner.
+
+READONLY mode can be set at runtime by calling the readonly() method with
+target_nodes='replicas', and read-write access can be restored by calling the
+readwrite() method.
+
+``` pycon
+ >>> from cluster import RedisCluster as Redis
+ # Use 'debug' log level to print the node that the command is executed on
+ >>> rc_readonly = Redis(startup_nodes=startup_nodes,
+ read_from_replicas=True, debug=True)
+ >>> rc_readonly.set('{foo}1', 'bar1')
+ >>> for i in range(0, 4):
+ # Assigns read command to the slot's hosts in a Round-Robin manner
+ >>> rc_readonly.get('{foo}1')
+ # set command would be directed only to the slot's primary node
+ >>> rc_readonly.set('{foo}2', 'bar2')
+ # reset READONLY flag
+ >>> rc_readonly.readwrite(target_nodes='replicas')
+ # now the get command would be directed only to the slot's primary node
+ >>> rc_readonly.get('{foo}1')
+```
+
+**Cluster Pipeline**
+
+ClusterPipeline is a subclass of RedisCluster that provides support for Redis
+pipelines in cluster mode.
+When calling the execute() command, all the commands are grouped by the node
+on which they will be executed, and are then executed by the respective nodes
+in parallel. The pipeline instance will wait for all the nodes to respond
+before returning the result to the caller. Command responses are returned as a
+list sorted in the same order in which they were sent.
+Pipelines can be used to dramatically increase the throughput of Redis Cluster
+by significantly reducing the the number of network round trips between the
+client and the server.
+
+``` pycon
+ >>> with rc.pipeline() as pipe:
+ >>> pipe.set('foo', 'value1')
+ >>> pipe.set('bar', 'value2')
+ >>> pipe.get('foo')
+ >>> pipe.get('bar')
+ >>> print(pipe.execute())
+ [True, True, b'value1', b'value2']
+ >>> pipe.set('foo1', 'bar1').get('foo1').execute()
+ [True, b'bar1']
+```
+Please note:
+- RedisCluster pipelines currently only support key-based commands.
+- The pipeline gets its 'read_from_replicas' value from the cluster's parameter.
+Thus, if read from replications is enabled in the cluster instance, the pipeline
+will also direct read commands to replicas.
+- The 'transcation' option is NOT supported in cluster-mode. In non-cluster mode,
+the 'transaction' option is available when executing pipelines. This wraps the
+pipeline commands with MULTI/EXEC commands, and effectively turns the pipeline
+commands into a single transaction block. This means that all commands are
+executed sequentially without any interruptions from other clients. However,
+in cluster-mode this is not possible, because commands are partitioned
+according to their respective destination nodes. This means that we can not
+turn the pipeline commands into one transaction block, because in most cases
+they are split up into several smaller pipelines.
+
+
+See [Redis Cluster tutorial](https://redis.io/topics/cluster-tutorial) and
+[Redis Cluster specifications](https://redis.io/topics/cluster-spec)
+to learn more about Redis Cluster.
### Author
diff --git a/benchmarks/base.py b/benchmarks/base.py
index 8c13afe..519c9cc 100644
--- a/benchmarks/base.py
+++ b/benchmarks/base.py
@@ -34,12 +34,12 @@ class Benchmark:
group_values = [group['values'] for group in self.ARGUMENTS]
for value_set in itertools.product(*group_values):
pairs = list(zip(group_names, value_set))
- arg_string = ', '.join(['%s=%s' % (p[0], p[1]) for p in pairs])
- sys.stdout.write('Benchmark: %s... ' % arg_string)
+ arg_string = ', '.join(f'{p[0]}={p[1]}' for p in pairs)
+ sys.stdout.write(f'Benchmark: {arg_string}... ')
sys.stdout.flush()
kwargs = dict(pairs)
setup = functools.partial(self.setup, **kwargs)
run = functools.partial(self.run, **kwargs)
t = timeit.timeit(stmt=run, setup=setup, number=1000)
- sys.stdout.write('%f\n' % t)
+ sys.stdout.write(f'{t:f}\n')
sys.stdout.flush()
diff --git a/benchmarks/basic_operations.py b/benchmarks/basic_operations.py
index 9446343..cb009de 100644
--- a/benchmarks/basic_operations.py
+++ b/benchmarks/basic_operations.py
@@ -49,9 +49,9 @@ def timer(func):
count = kwargs['num']
else:
count = args[1]
- print('{} - {} Requests'.format(func.__name__, count))
- print('Duration = {}'.format(duration))
- print('Rate = {}'.format(count/duration))
+ print(f'{func.__name__} - {count} Requests')
+ print(f'Duration = {duration}')
+ print(f'Rate = {count/duration}')
print()
return ret
return wrapper
@@ -62,10 +62,9 @@ def set_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
- format_str = '{:0<%d}' % data_size
- set_data = format_str.format('a')
+ set_data = 'a'.ljust(data_size, '0')
for i in range(num):
- conn.set('set_str:%d' % i, set_data)
+ conn.set(f'set_str:{i}', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
@@ -78,10 +77,9 @@ def set_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
- format_str = '{:0<%d}' % data_size
- set_data = int(format_str.format('1'))
+ set_data = 10 ** (data_size - 1)
for i in range(num):
- conn.set('set_int:%d' % i, set_data)
+ conn.set(f'set_int:{i}', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
@@ -95,7 +93,7 @@ def get_str(conn, num, pipeline_size, data_size):
conn = conn.pipeline()
for i in range(num):
- conn.get('set_str:%d' % i)
+ conn.get(f'set_str:{i}')
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
@@ -109,7 +107,7 @@ def get_int(conn, num, pipeline_size, data_size):
conn = conn.pipeline()
for i in range(num):
- conn.get('set_int:%d' % i)
+ conn.get(f'set_int:{i}')
if pipeline_size > 1 and i % pipeline_size == 0:
conn.execute()
@@ -136,8 +134,7 @@ def lpush(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = conn.pipeline()
- format_str = '{:0<%d}' % data_size
- set_data = int(format_str.format('1'))
+ set_data = 10 ** (data_size - 1)
for i in range(num):
conn.lpush('lpush_key', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
diff --git a/benchmarks/command_packer_benchmark.py b/benchmarks/command_packer_benchmark.py
index 823a8c8..3176c06 100644
--- a/benchmarks/command_packer_benchmark.py
+++ b/benchmarks/command_packer_benchmark.py
@@ -1,4 +1,3 @@
-import socket
from redis.connection import (Connection, SYM_STAR, SYM_DOLLAR, SYM_EMPTY,
SYM_CRLF)
from base import Benchmark
@@ -11,14 +10,13 @@ class StringJoiningConnection(Connection):
self.connect()
try:
self._sock.sendall(command)
- except socket.error as e:
+ except OSError as e:
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
- raise ConnectionError("Error %s while writing to socket. %s." %
- (_errno, errmsg))
+ raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.")
except Exception:
self.disconnect()
raise
@@ -43,14 +41,13 @@ class ListJoiningConnection(Connection):
command = [command]
for item in command:
self._sock.sendall(item)
- except socket.error as e:
+ except OSError as e:
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
- raise ConnectionError("Error %s while writing to socket. %s." %
- (_errno, errmsg))
+ raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.")
except Exception:
self.disconnect()
raise
diff --git a/dev_requirements.txt b/dev_requirements.txt
index 7f099cb..56ac08e 100644
--- a/dev_requirements.txt
+++ b/dev_requirements.txt
@@ -1,4 +1,5 @@
flake8>=3.9.2
+flynt~=0.69.0
pytest==6.2.5
pytest-timeout==2.0.1
tox==3.24.4
diff --git a/docker/base/Dockerfile.cluster b/docker/base/Dockerfile.cluster
new file mode 100644
index 0000000..70e8013
--- /dev/null
+++ b/docker/base/Dockerfile.cluster
@@ -0,0 +1,8 @@
+FROM redis:6.2.6-buster
+
+COPY create_cluster.sh /create_cluster.sh
+RUN chmod +x /create_cluster.sh
+
+EXPOSE 16379 16380 16381 16382 16383 16384
+
+CMD [ "/create_cluster.sh"] \ No newline at end of file
diff --git a/docker/base/create_cluster.sh b/docker/base/create_cluster.sh
new file mode 100644
index 0000000..82a79c8
--- /dev/null
+++ b/docker/base/create_cluster.sh
@@ -0,0 +1,26 @@
+#! /bin/bash
+mkdir -p /nodes
+touch /nodes/nodemap
+for PORT in $(seq 16379 16384); do
+ mkdir -p /nodes/$PORT
+ if [[ -e /redis.conf ]]; then
+ cp /redis.conf /nodes/$PORT/redis.conf
+ else
+ touch /nodes/$PORT/redis.conf
+ fi
+ cat << EOF >> /nodes/$PORT/redis.conf
+port ${PORT}
+cluster-enabled yes
+daemonize yes
+logfile /redis.log
+dir /nodes/$PORT
+EOF
+ redis-server /nodes/$PORT/redis.conf
+ if [ $? -ne 0 ]; then
+ echo "Redis failed to start, exiting."
+ exit 3
+ fi
+ echo 127.0.0.1:$PORT >> /nodes/nodemap
+done
+echo yes | redis-cli --cluster create $(seq -f 127.0.0.1:%g 16379 16384) --cluster-replicas 1
+tail -f /redis.log
diff --git a/docker/cluster/redis.conf b/docker/cluster/redis.conf
new file mode 100644
index 0000000..dff658c
--- /dev/null
+++ b/docker/cluster/redis.conf
@@ -0,0 +1,3 @@
+# Redis Cluster config file will be shared across all nodes.
+# Do not change the following configurations that are already set:
+# port, cluster-enabled, daemonize, logfile, dir
diff --git a/docs/backoff.rst b/docs/backoff.rst
new file mode 100644
index 0000000..e640b56
--- /dev/null
+++ b/docs/backoff.rst
@@ -0,0 +1,5 @@
+Backoff
+#############
+
+.. automodule:: redis.backoff
+ :members: \ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
index f497e3d..8520969 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -29,7 +29,8 @@ sys.path.append(os.path.abspath(os.path.pardir))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
- "sphinx.ext.viewcode"
+ "sphinx.ext.viewcode",
+ "sphinx.ext.autosectionlabel",
]
# Add any paths that contain templates here, relative to this directory.
@@ -53,10 +54,11 @@ copyright = "2021, Redis Inc"
# built documents.
#
# The short X.Y version.
-version = "4.0"
+import redis
+version = '.'.join(redis.__version__.split(".")[0:2])
# The full version, including alpha/beta/rc tags.
-release = "4.0.0"
+release = redis.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -93,17 +95,27 @@ pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
+nitpicky = True
+
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = "default"
+html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-# html_theme_options = {}
+html_theme_options = {
+ 'display_version': True,
+ 'prev_next_buttons_location': 'bottom',
+ 'style_external_links': False,
+ # Toc options
+ 'collapse_navigation': True,
+ 'sticky_navigation': True,
+ 'navigation_depth': 4,
+}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
diff --git a/docs/connections.rst b/docs/connections.rst
new file mode 100644
index 0000000..973821b
--- /dev/null
+++ b/docs/connections.rst
@@ -0,0 +1,12 @@
+Connecting to Redis
+#####################
+
+Generic Client
+**************
+.. autoclass:: redis.client.Redis
+ :members:
+
+Connection Pools
+*****************
+.. autoclass:: redis.connection.ConnectionPool
+ :members: \ No newline at end of file
diff --git a/docs/exceptions.rst b/docs/exceptions.rst
new file mode 100644
index 0000000..b8aeb33
--- /dev/null
+++ b/docs/exceptions.rst
@@ -0,0 +1,7 @@
+
+
+Exceptions
+##########
+
+.. automodule:: redis.exceptions
+ :members: \ No newline at end of file
diff --git a/docs/genindex.rst b/docs/genindex.rst
new file mode 100644
index 0000000..c1f8355
--- /dev/null
+++ b/docs/genindex.rst
@@ -0,0 +1,2 @@
+Module Index
+============ \ No newline at end of file
diff --git a/docs/index.rst b/docs/index.rst
index 8af5385..8e243f3 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -6,37 +6,71 @@
Welcome to redis-py's documentation!
====================================
-Indices and tables
-------------------
+Getting Started
+****************
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+`redis-py <https://pypi.org/project/redis>`_ requires a running Redis server, and Python 3.6+. See the `Redis
+quickstart <https://redis.io/topics/quickstart>`_ for Redis installation instructions.
-Contents:
----------
+redis-py can be installed using pip via ``pip install redis``.
+
+
+Quickly connecting to redis
+************
+
+There are two quick ways to connect to Redis.
+
+Assuming you run Redis on localhost:6379 (the default)::
+ import redis
+ r = redis.Redis()
+ r.ping()
+
+Running redis on foo.bar.com, port 12345::
+ import redis
+ r = redis.Redis(host='foo.bar.com', port=12345)
+ r.ping()
+
+Another example with foo.bar.com, port 12345::
+ import redis
+ r = redis.from_url('redis://foo.bar.com:12345')
+ r.ping()
+
+After that, you probably want to `run redis commands <redis_core_commands.html>`_.
.. toctree::
- :maxdepth: 2
+ :hidden:
+
+ genindex
-.. automodule:: redis
- :members:
+Redis Command Functions
+***********************
+.. toctree::
+ :maxdepth: 2
-.. automodule:: redis.backoff
- :members:
+ redis_core_commands
+ sentinel_commands
+ redismodules
-.. automodule:: redis.connection
- :members:
+Module Documentation
+********************
+.. toctree::
+ :maxdepth: 1
-.. automodule:: redis.commands
- :members:
+ backoff
+ connections
+ exceptions
+ lock
+ retry
-.. automodule:: redis.exceptions
- :members:
+Contributing
+*************
-.. automodule:: redis.lock
- :members:
+- `How to contribute <https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md>`_
+- `Issue Tracker <https://github.com/redis/redis-py/issues>`_
+- `Source Code <https://github.com/redis/redis-py/>`_
+- `Release History <https://github.com/redis/redis-py/releases/>`_
-.. automodule:: redis.sentinel
- :members:
+License
+*******
+This projectis licensed under the `MIT license <https://github.com/redis/redis-py/blob/master/LICENSE>`_.
diff --git a/docs/lock.rst b/docs/lock.rst
new file mode 100644
index 0000000..cce0867
--- /dev/null
+++ b/docs/lock.rst
@@ -0,0 +1,5 @@
+Lock
+#########
+
+.. automodule:: redis.lock
+ :members: \ No newline at end of file
diff --git a/docs/redis_core_commands.rst b/docs/redis_core_commands.rst
new file mode 100644
index 0000000..edfd7fe
--- /dev/null
+++ b/docs/redis_core_commands.rst
@@ -0,0 +1,14 @@
+Redis Core Commands
+####################
+
+The following functions can be used to replicate their equivalent `Redis command <https://redis.io/commands>`_. Generally they can be used as functions on your redis connection. For the simplest example, see below:
+
+Getting and settings data in redis::
+
+ import redis
+ r = redis.Redis(decode_responses=True)
+ r.set('mykey', 'thevalueofmykey')
+ r.get('mykey')
+
+.. autoclass:: redis.commands.core.CoreCommands
+ :members: \ No newline at end of file
diff --git a/docs/redismodules.rst b/docs/redismodules.rst
new file mode 100644
index 0000000..da8c36b
--- /dev/null
+++ b/docs/redismodules.rst
@@ -0,0 +1,19 @@
+Redis Modules Commands
+######################
+
+Accessing redis module commands requires the installation of the supported `Redis module <https://docs.redis.com/latest/modules/>`_. For a quick start with redis modules, try the `Redismod docker <https://hub.docker.com/r/redislabs/redismod>`_.
+
+RedisTimeSeries Commands
+************************
+.. automodule:: redis.commands.timeseries.commands
+ :members: TimeSeriesCommands
+
+RedisJSON Commands
+******************
+.. automodule:: redis.commands.json.commands
+ :members: JSONCommands
+
+RediSearch Commands
+*******************
+.. automodule:: redis.commands.search.commands
+ :members: SearchCommands \ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 2e1c4fb..6dc905f 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,2 +1,3 @@
sphinx<2
docutils<0.18
+sphinx-rtd-theme
diff --git a/docs/retry.rst b/docs/retry.rst
new file mode 100644
index 0000000..2b4f22c
--- /dev/null
+++ b/docs/retry.rst
@@ -0,0 +1,5 @@
+Retry Helpers
+#############
+
+.. automodule:: redis.retry
+ :members: \ No newline at end of file
diff --git a/docs/sentinel_commands.rst b/docs/sentinel_commands.rst
new file mode 100644
index 0000000..e5be11e
--- /dev/null
+++ b/docs/sentinel_commands.rst
@@ -0,0 +1,20 @@
+Redis Sentinel Commands
+=======================
+
+redis-py can be used together with `Redis
+Sentinel <https://redis.io/topics/sentinel>`_ to discover Redis nodes. You
+need to have at least one Sentinel daemon running in order to use
+redis-py's Sentinel support.
+
+Connection example (assumes redis redis on the ports listed below):
+
+ >>> from redis import Sentinel
+ >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
+ >>> sentinel.discover_master('mymaster')
+ ('127.0.0.1', 6379)
+ >>> sentinel.discover_slaves('mymaster')
+ [('127.0.0.1', 6380)]
+
+
+.. autoclass:: redis.commands.sentinel.SentinelCommands
+ :members: \ No newline at end of file
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 0000000..ca6d5dc
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,3 @@
+# Examples
+
+Examples of redis-py usage go here. They're being linked to the [generated documentation](https://redis-py.readthedocs.org).
diff --git a/redis/__init__.py b/redis/__init__.py
index dc9b11a..bc7f3c9 100644
--- a/redis/__init__.py
+++ b/redis/__init__.py
@@ -1,4 +1,5 @@
from redis.client import Redis, StrictRedis
+from redis.cluster import RedisCluster
from redis.connection import (
BlockingConnectionPool,
ConnectionPool,
@@ -37,7 +38,7 @@ def int_or_str(value):
return value
-__version__ = "4.0.2"
+__version__ = "4.1.0rc1"
VERSION = tuple(map(int_or_str, __version__.split('.')))
@@ -57,6 +58,7 @@ __all__ = [
'PubSubError',
'ReadOnlyError',
'Redis',
+ 'RedisCluster',
'RedisError',
'ResponseError',
'Sentinel',
diff --git a/redis/client.py b/redis/client.py
index dc6693d..9f2907e 100755
--- a/redis/client.py
+++ b/redis/client.py
@@ -1,7 +1,6 @@
from itertools import chain
import copy
import datetime
-import hashlib
import re
import threading
import time
@@ -15,7 +14,6 @@ from redis.exceptions import (
ConnectionError,
ExecAbortError,
ModuleError,
- NoScriptError,
PubSubError,
RedisError,
ResponseError,
@@ -27,6 +25,9 @@ from redis.utils import safe_str, str_if_bytes
SYM_EMPTY = b''
EMPTY_RESPONSE = 'EMPTY_RESPONSE'
+# some responses (ie. dump) are binary, and just meant to never be decoded
+NEVER_DECODE = 'NEVER_DECODE'
+
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
@@ -461,6 +462,7 @@ def _parse_node_line(line):
line_items = line.split(' ')
node_id, addr, flags, master_id, ping, pong, epoch, \
connected = line.split(' ')[:8]
+ addr = addr.split('@')[0]
slots = [sl.split('-') for sl in line_items[8:]]
node_dict = {
'node_id': node_id,
@@ -476,8 +478,13 @@ def _parse_node_line(line):
def parse_cluster_nodes(response, **options):
- raw_lines = str_if_bytes(response).splitlines()
- return dict(_parse_node_line(line) for line in raw_lines)
+ """
+ @see: https://redis.io/commands/cluster-nodes # string
+ @see: https://redis.io/commands/cluster-replicas # list of string
+ """
+ if isinstance(response, str):
+ response = response.splitlines()
+ return dict(_parse_node_line(str_if_bytes(node)) for node in response)
def parse_geosearch_generic(response, **options):
@@ -516,6 +523,21 @@ def parse_geosearch_generic(response, **options):
]
+def parse_command(response, **options):
+ commands = {}
+ for command in response:
+ cmd_dict = {}
+ cmd_name = str_if_bytes(command[0])
+ cmd_dict['name'] = cmd_name
+ cmd_dict['arity'] = int(command[1])
+ cmd_dict['flags'] = [str_if_bytes(flag) for flag in command[2]]
+ cmd_dict['first_key_pos'] = command[3]
+ cmd_dict['last_key_pos'] = command[4]
+ cmd_dict['step_count'] = command[5]
+ commands[cmd_name] = cmd_dict
+ return commands
+
+
def parse_pubsub_numsub(response, **options):
return list(zip(response[0::2], response[1::2]))
@@ -607,7 +629,7 @@ def parse_set_result(response, **options):
return response and str_if_bytes(response) == 'OK'
-class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
+class Redis(RedisModuleCommands, CoreCommands, SentinelCommands):
"""
Implementation of the Redis protocol.
@@ -704,7 +726,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
'CLUSTER SET-CONFIG-EPOCH': bool_ok,
'CLUSTER SETSLOT': bool_ok,
'CLUSTER SLAVES': parse_cluster_nodes,
+ 'CLUSTER REPLICAS': parse_cluster_nodes,
+ 'COMMAND': parse_command,
'COMMAND COUNT': int,
+ 'COMMAND GETKEYS': lambda r: list(map(str_if_bytes, r)),
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
@@ -827,7 +852,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
ssl_check_hostname=False,
max_connections=None, single_connection_client=False,
health_check_interval=0, client_name=None, username=None,
- retry=None):
+ retry=None, redis_connect_func=None):
"""
Initialize a new Redis client.
To specify a retry policy, first set `retry_on_timeout` to `True`
@@ -855,7 +880,8 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
'retry': copy.deepcopy(retry),
'max_connections': max_connections,
'health_check_interval': health_check_interval,
- 'client_name': client_name
+ 'client_name': client_name,
+ 'redis_connect_func': redis_connect_func
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
@@ -892,7 +918,7 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
self.__class__.RESPONSE_CALLBACKS)
def __repr__(self):
- return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
+ return f"{type(self).__name__}<{repr(self.connection_pool)}>"
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
@@ -1081,7 +1107,10 @@ class Redis(RedisModuleCommands, CoreCommands, SentinelCommands, object):
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
try:
- response = connection.read_response()
+ if NEVER_DECODE in options:
+ response = connection.read_response(disable_decoding=True)
+ else:
+ response = connection.read_response()
except ResponseError:
if EMPTY_RESPONSE in options:
return options[EMPTY_RESPONSE]
@@ -1112,7 +1141,7 @@ class Monitor:
# check that monitor returns 'OK', but don't return it to user
response = self.connection.read_response()
if not bool_ok(response):
- raise RedisError('MONITOR failed: %s' % response)
+ raise RedisError(f'MONITOR failed: {response}')
return self
def __exit__(self, *args):
@@ -1173,14 +1202,16 @@ class PubSub:
HEALTH_CHECK_MESSAGE = 'redis-py-health-check'
def __init__(self, connection_pool, shard_hint=None,
- ignore_subscribe_messages=False):
+ ignore_subscribe_messages=False, encoder=None):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
self.connection = None
# we need to know the encoding options for this connection in order
# to lookup channel and pattern names for callback handlers.
- self.encoder = self.connection_pool.get_encoder()
+ self.encoder = encoder
+ if self.encoder is None:
+ self.encoder = self.connection_pool.get_encoder()
if self.encoder.decode_responses:
self.health_check_response = ['pong', self.HEALTH_CHECK_MESSAGE]
else:
@@ -1486,12 +1517,10 @@ class PubSub:
exception_handler=None):
for channel, handler in self.channels.items():
if handler is None:
- raise PubSubError("Channel: '%s' has no handler registered" %
- channel)
+ raise PubSubError(f"Channel: '{channel}' has no handler registered")
for pattern, handler in self.patterns.items():
if handler is None:
- raise PubSubError("Pattern: '%s' has no handler registered" %
- pattern)
+ raise PubSubError(f"Pattern: '{pattern}' has no handler registered")
thread = PubSubWorkerThread(
self,
@@ -1776,8 +1805,10 @@ class Pipeline(Redis):
def annotate_exception(self, exception, number, command):
cmd = ' '.join(map(safe_str, command))
- msg = 'Command # %d (%s) of pipeline caused error: %s' % (
- number, cmd, exception.args[0])
+ msg = (
+ f'Command # {number} ({cmd}) of pipeline '
+ f'caused error: {exception.args[0]}'
+ )
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
@@ -1863,37 +1894,3 @@ class Pipeline(Redis):
def unwatch(self):
"Unwatches all previously specified keys"
return self.watching and self.execute_command('UNWATCH') or True
-
-
-class Script:
- "An executable Lua script object returned by ``register_script``"
-
- def __init__(self, registered_client, script):
- self.registered_client = registered_client
- self.script = script
- # Precalculate and store the SHA1 hex digest of the script.
-
- if isinstance(script, str):
- # We need the encoding from the client in order to generate an
- # accurate byte representation of the script
- encoder = registered_client.connection_pool.get_encoder()
- script = encoder.encode(script)
- self.sha = hashlib.sha1(script).hexdigest()
-
- def __call__(self, keys=[], args=[], client=None):
- "Execute the script, passing any required ``args``"
- if client is None:
- client = self.registered_client
- args = tuple(keys) + tuple(args)
- # make sure the Redis server knows about the script
- if isinstance(client, Pipeline):
- # Make sure the pipeline can register the script before executing.
- client.scripts.add(self)
- try:
- return client.evalsha(self.sha, len(keys), *args)
- except NoScriptError:
- # Maybe the client is pointed to a different server than the client
- # that created this instance?
- # Overwrite the sha just in case there was a discrepancy.
- self.sha = client.script_load(self.script)
- return client.evalsha(self.sha, len(keys), *args)
diff --git a/redis/cluster.py b/redis/cluster.py
new file mode 100644
index 0000000..c1853aa
--- /dev/null
+++ b/redis/cluster.py
@@ -0,0 +1,2060 @@
+import copy
+import logging
+import random
+import socket
+import time
+import threading
+import sys
+
+from collections import OrderedDict
+from redis.client import CaseInsensitiveDict, Redis, PubSub
+from redis.commands import (
+ ClusterCommands,
+ CommandsParser
+)
+from redis.connection import DefaultParser, ConnectionPool, Encoder, parse_url
+from redis.crc import key_slot, REDIS_CLUSTER_HASH_SLOTS
+from redis.exceptions import (
+ AskError,
+ BusyLoadingError,
+ ClusterCrossSlotError,
+ ClusterDownError,
+ ClusterError,
+ DataError,
+ MasterDownError,
+ MovedError,
+ RedisClusterException,
+ RedisError,
+ ResponseError,
+ SlotNotCoveredError,
+ TimeoutError,
+ TryAgainError,
+)
+from redis.utils import (
+ dict_merge,
+ list_keys_to_dict,
+ merge_result,
+ str_if_bytes,
+ safe_str
+)
+
+log = logging.getLogger(__name__)
+
+
+def get_node_name(host, port):
+ return f'{host}:{port}'
+
+
+def get_connection(redis_node, *args, **options):
+ return redis_node.connection or redis_node.connection_pool.get_connection(
+ args[0], **options
+ )
+
+
+def parse_scan_result(command, res, **options):
+ keys_list = []
+ for primary_res in res.values():
+ keys_list += primary_res[1]
+ return 0, keys_list
+
+
+def parse_pubsub_numsub(command, res, **options):
+ numsub_d = OrderedDict()
+ for numsub_tups in res.values():
+ for channel, numsubbed in numsub_tups:
+ try:
+ numsub_d[channel] += numsubbed
+ except KeyError:
+ numsub_d[channel] = numsubbed
+
+ ret_numsub = [
+ (channel, numsub)
+ for channel, numsub in numsub_d.items()
+ ]
+ return ret_numsub
+
+
+def parse_cluster_slots(resp, **options):
+ current_host = options.get('current_host', '')
+
+ def fix_server(*args):
+ return str_if_bytes(args[0]) or current_host, args[1]
+
+ slots = {}
+ for slot in resp:
+ start, end, primary = slot[:3]
+ replicas = slot[3:]
+ slots[start, end] = {
+ 'primary': fix_server(*primary),
+ 'replicas': [fix_server(*replica) for replica in replicas],
+ }
+
+ return slots
+
+
+PRIMARY = "primary"
+REPLICA = "replica"
+SLOT_ID = "slot-id"
+
+REDIS_ALLOWED_KEYS = (
+ "charset",
+ "connection_class",
+ "connection_pool",
+ "db",
+ "decode_responses",
+ "encoding",
+ "encoding_errors",
+ "errors",
+ "host",
+ "max_connections",
+ "nodes_flag",
+ "redis_connect_func",
+ "password",
+ "port",
+ "retry",
+ "retry_on_timeout",
+ "socket_connect_timeout",
+ "socket_keepalive",
+ "socket_keepalive_options",
+ "socket_timeout",
+ "ssl",
+ "ssl_ca_certs",
+ "ssl_certfile",
+ "ssl_cert_reqs",
+ "ssl_keyfile",
+ "unix_socket_path",
+ "username",
+)
+KWARGS_DISABLED_KEYS = (
+ "host",
+ "port",
+)
+
+# Not complete, but covers the major ones
+# https://redis.io/commands
+READ_COMMANDS = frozenset([
+ "BITCOUNT",
+ "BITPOS",
+ "EXISTS",
+ "GEODIST",
+ "GEOHASH",
+ "GEOPOS",
+ "GEORADIUS",
+ "GEORADIUSBYMEMBER",
+ "GET",
+ "GETBIT",
+ "GETRANGE",
+ "HEXISTS",
+ "HGET",
+ "HGETALL",
+ "HKEYS",
+ "HLEN",
+ "HMGET",
+ "HSTRLEN",
+ "HVALS",
+ "KEYS",
+ "LINDEX",
+ "LLEN",
+ "LRANGE",
+ "MGET",
+ "PTTL",
+ "RANDOMKEY",
+ "SCARD",
+ "SDIFF",
+ "SINTER",
+ "SISMEMBER",
+ "SMEMBERS",
+ "SRANDMEMBER",
+ "STRLEN",
+ "SUNION",
+ "TTL",
+ "ZCARD",
+ "ZCOUNT",
+ "ZRANGE",
+ "ZSCORE",
+])
+
+
+def cleanup_kwargs(**kwargs):
+ """
+ Remove unsupported or disabled keys from kwargs
+ """
+ connection_kwargs = {
+ k: v
+ for k, v in kwargs.items()
+ if k in REDIS_ALLOWED_KEYS and k not in KWARGS_DISABLED_KEYS
+ }
+
+ return connection_kwargs
+
+
+class ClusterParser(DefaultParser):
+ EXCEPTION_CLASSES = dict_merge(
+ DefaultParser.EXCEPTION_CLASSES, {
+ 'ASK': AskError,
+ 'TRYAGAIN': TryAgainError,
+ 'MOVED': MovedError,
+ 'CLUSTERDOWN': ClusterDownError,
+ 'CROSSSLOT': ClusterCrossSlotError,
+ 'MASTERDOWN': MasterDownError,
+ })
+
+
+class RedisCluster(ClusterCommands):
+ RedisClusterRequestTTL = 16
+
+ PRIMARIES = "primaries"
+ REPLICAS = "replicas"
+ ALL_NODES = "all"
+ RANDOM = "random"
+ DEFAULT_NODE = "default-node"
+
+ NODE_FLAGS = {
+ PRIMARIES,
+ REPLICAS,
+ ALL_NODES,
+ RANDOM,
+ DEFAULT_NODE
+ }
+
+ COMMAND_FLAGS = dict_merge(
+ list_keys_to_dict(
+ [
+ "CLIENT LIST",
+ "CLIENT SETNAME",
+ "CLIENT GETNAME",
+ "CONFIG SET",
+ "CONFIG REWRITE",
+ "CONFIG RESETSTAT",
+ "TIME",
+ "PUBSUB CHANNELS",
+ "PUBSUB NUMPAT",
+ "PUBSUB NUMSUB",
+ "PING",
+ "INFO",
+ "SHUTDOWN",
+ "KEYS",
+ "SCAN",
+ "FLUSHALL",
+ "FLUSHDB",
+ "DBSIZE",
+ "BGSAVE",
+ "SLOWLOG GET",
+ "SLOWLOG LEN",
+ "SLOWLOG RESET",
+ "WAIT",
+ "SAVE",
+ "MEMORY PURGE",
+ "MEMORY MALLOC-STATS",
+ "MEMORY STATS",
+ "LASTSAVE",
+ "CLIENT TRACKINGINFO",
+ "CLIENT PAUSE",
+ "CLIENT UNPAUSE",
+ "CLIENT UNBLOCK",
+ "CLIENT ID",
+ "CLIENT REPLY",
+ "CLIENT GETREDIR",
+ "CLIENT INFO",
+ "CLIENT KILL",
+ "READONLY",
+ "READWRITE",
+ "CLUSTER INFO",
+ "CLUSTER MEET",
+ "CLUSTER NODES",
+ "CLUSTER REPLICAS",
+ "CLUSTER RESET",
+ "CLUSTER SET-CONFIG-EPOCH",
+ "CLUSTER SLOTS",
+ "CLUSTER COUNT-FAILURE-REPORTS",
+ "CLUSTER KEYSLOT",
+ "COMMAND",
+ "COMMAND COUNT",
+ "COMMAND GETKEYS",
+ "CONFIG GET",
+ "DEBUG",
+ "RANDOMKEY",
+ "READONLY",
+ "READWRITE",
+ "TIME",
+ ],
+ DEFAULT_NODE,
+ ),
+ list_keys_to_dict(
+ [
+ "CLUSTER COUNTKEYSINSLOT",
+ "CLUSTER DELSLOTS",
+ "CLUSTER GETKEYSINSLOT",
+ "CLUSTER SETSLOT",
+ ],
+ SLOT_ID,
+ ),
+ )
+
+ CLUSTER_COMMANDS_RESPONSE_CALLBACKS = {
+ 'CLUSTER ADDSLOTS': bool,
+ 'CLUSTER COUNT-FAILURE-REPORTS': int,
+ 'CLUSTER COUNTKEYSINSLOT': int,
+ 'CLUSTER DELSLOTS': bool,
+ 'CLUSTER FAILOVER': bool,
+ 'CLUSTER FORGET': bool,
+ 'CLUSTER GETKEYSINSLOT': list,
+ 'CLUSTER KEYSLOT': int,
+ 'CLUSTER MEET': bool,
+ 'CLUSTER REPLICATE': bool,
+ 'CLUSTER RESET': bool,
+ 'CLUSTER SAVECONFIG': bool,
+ 'CLUSTER SET-CONFIG-EPOCH': bool,
+ 'CLUSTER SETSLOT': bool,
+ 'CLUSTER SLOTS': parse_cluster_slots,
+ 'ASKING': bool,
+ 'READONLY': bool,
+ 'READWRITE': bool,
+ }
+
+ RESULT_CALLBACKS = dict_merge(
+ list_keys_to_dict([
+ "PUBSUB NUMSUB",
+ ], parse_pubsub_numsub),
+ list_keys_to_dict([
+ "PUBSUB NUMPAT",
+ ], lambda command, res: sum(list(res.values()))),
+ list_keys_to_dict([
+ "KEYS",
+ "PUBSUB CHANNELS",
+ ], merge_result),
+ list_keys_to_dict([
+ "PING",
+ "CONFIG SET",
+ "CONFIG REWRITE",
+ "CONFIG RESETSTAT",
+ "CLIENT SETNAME",
+ "BGSAVE",
+ "SLOWLOG RESET",
+ "SAVE",
+ "MEMORY PURGE",
+ "CLIENT PAUSE",
+ "CLIENT UNPAUSE",
+ ], lambda command, res: all(res.values()) if isinstance(res, dict)
+ else res),
+ list_keys_to_dict([
+ "DBSIZE",
+ "WAIT",
+ ], lambda command, res: sum(res.values()) if isinstance(res, dict)
+ else res),
+ list_keys_to_dict([
+ "CLIENT UNBLOCK",
+ ], lambda command, res: 1 if sum(res.values()) > 0 else 0),
+ list_keys_to_dict([
+ "SCAN",
+ ], parse_scan_result)
+ )
+
+ def __init__(
+ self,
+ host=None,
+ port=6379,
+ startup_nodes=None,
+ cluster_error_retry_attempts=3,
+ require_full_coverage=True,
+ skip_full_coverage_check=False,
+ reinitialize_steps=10,
+ read_from_replicas=False,
+ url=None,
+ retry_on_timeout=False,
+ retry=None,
+ **kwargs
+ ):
+ """
+ Initialize a new RedisCluster client.
+
+ :startup_nodes: 'list[ClusterNode]'
+ List of nodes from which initial bootstrapping can be done
+ :host: 'str'
+ Can be used to point to a startup node
+ :port: 'int'
+ Can be used to point to a startup node
+ :require_full_coverage: 'bool'
+ If set to True, as it is by default, all slots must be covered.
+ If set to False and not all slots are covered, the instance
+ creation will succeed only if 'cluster-require-full-coverage'
+ configuration is set to 'no' in all of the cluster's nodes.
+ Otherwise, RedisClusterException will be thrown.
+ :skip_full_coverage_check: 'bool'
+ If require_full_coverage is set to False, a check of
+ cluster-require-full-coverage config will be executed against all
+ nodes. Set skip_full_coverage_check to True to skip this check.
+ Useful for clusters without the CONFIG command (like ElastiCache)
+ :read_from_replicas: 'bool'
+ Enable read from replicas in READONLY mode. You can read possibly
+ stale data.
+ When set to true, read commands will be assigned between the
+ primary and its replications in a Round-Robin manner.
+ :cluster_error_retry_attempts: 'int'
+ Retry command execution attempts when encountering ClusterDownError
+ or ConnectionError
+ :retry_on_timeout: 'bool'
+ To specify a retry policy, first set `retry_on_timeout` to `True`
+ then set `retry` to a valid `Retry` object
+ :retry: 'Retry'
+ a `Retry` object
+ :**kwargs:
+ Extra arguments that will be sent into Redis instance when created
+ (See Official redis-py doc for supported kwargs
+ [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])
+ Some kwargs are not supported and will raise a
+ RedisClusterException:
+ - db (Redis do not support database SELECT in cluster mode)
+ """
+ log.info("Creating a new instance of RedisCluster client")
+
+ if startup_nodes is None:
+ startup_nodes = []
+
+ if "db" in kwargs:
+ # Argument 'db' is not possible to use in cluster mode
+ raise RedisClusterException(
+ "Argument 'db' is not possible to use in cluster mode"
+ )
+
+ if retry_on_timeout:
+ kwargs.update({'retry_on_timeout': retry_on_timeout,
+ 'retry': retry})
+
+ # Get the startup node/s
+ from_url = False
+ if url is not None:
+ from_url = True
+ url_options = parse_url(url)
+ if "path" in url_options:
+ raise RedisClusterException(
+ "RedisCluster does not currently support Unix Domain "
+ "Socket connections")
+ if "db" in url_options and url_options["db"] != 0:
+ # Argument 'db' is not possible to use in cluster mode
+ raise RedisClusterException(
+ "A ``db`` querystring option can only be 0 in cluster mode"
+ )
+ kwargs.update(url_options)
+ host = kwargs.get('host')
+ port = kwargs.get('port', port)
+ startup_nodes.append(ClusterNode(host, port))
+ elif host is not None and port is not None:
+ startup_nodes.append(ClusterNode(host, port))
+ elif len(startup_nodes) == 0:
+ # No startup node was provided
+ raise RedisClusterException(
+ "RedisCluster requires at least one node to discover the "
+ "cluster. Please provide one of the followings:\n"
+ "1. host and port, for example:\n"
+ " RedisCluster(host='localhost', port=6379)\n"
+ "2. list of startup nodes, for example:\n"
+ " RedisCluster(startup_nodes=[ClusterNode('localhost', 6379),"
+ " ClusterNode('localhost', 6378)])")
+ log.debug(f"startup_nodes : {startup_nodes}")
+ # Update the connection arguments
+ # Whenever a new connection is established, RedisCluster's on_connect
+ # method should be run
+ # If the user passed on_connect function we'll save it and run it
+ # inside the RedisCluster.on_connect() function
+ self.user_on_connect_func = kwargs.pop("redis_connect_func", None)
+ kwargs.update({"redis_connect_func": self.on_connect})
+ kwargs = cleanup_kwargs(**kwargs)
+
+ self.encoder = Encoder(
+ kwargs.get("encoding", "utf-8"),
+ kwargs.get("encoding_errors", "strict"),
+ kwargs.get("decode_responses", False),
+ )
+ self.cluster_error_retry_attempts = cluster_error_retry_attempts
+ self.command_flags = self.__class__.COMMAND_FLAGS.copy()
+ self.node_flags = self.__class__.NODE_FLAGS.copy()
+ self.read_from_replicas = read_from_replicas
+ self.reinitialize_counter = 0
+ self.reinitialize_steps = reinitialize_steps
+ self.nodes_manager = None
+ self.nodes_manager = NodesManager(
+ startup_nodes=startup_nodes,
+ from_url=from_url,
+ require_full_coverage=require_full_coverage,
+ skip_full_coverage_check=skip_full_coverage_check,
+ **kwargs,
+ )
+
+ self.cluster_response_callbacks = CaseInsensitiveDict(
+ self.__class__.CLUSTER_COMMANDS_RESPONSE_CALLBACKS)
+ self.result_callbacks = CaseInsensitiveDict(
+ self.__class__.RESULT_CALLBACKS)
+ self.commands_parser = CommandsParser(self)
+ self._lock = threading.Lock()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def __del__(self):
+ self.close()
+
+ def disconnect_connection_pools(self):
+ for node in self.get_nodes():
+ if node.redis_connection:
+ try:
+ node.redis_connection.connection_pool.disconnect()
+ except OSError:
+ # Client was already disconnected. do nothing
+ pass
+
+ @classmethod
+ def from_url(cls, url, **kwargs):
+ """
+ Return a Redis client object configured from the given URL
+
+ For example::
+
+ redis://[[username]:[password]]@localhost:6379/0
+ rediss://[[username]:[password]]@localhost:6379/0
+ unix://[[username]:[password]]@/path/to/socket.sock?db=0
+
+ Three URL schemes are supported:
+
+ - `redis://` creates a TCP socket connection. See more at:
+ <https://www.iana.org/assignments/uri-schemes/prov/redis>
+ - `rediss://` creates a SSL wrapped TCP socket connection. See more at:
+ <https://www.iana.org/assignments/uri-schemes/prov/rediss>
+ - ``unix://``: creates a Unix Domain Socket connection.
+
+ The username, password, hostname, path and all querystring values
+ are passed through urllib.parse.unquote in order to replace any
+ percent-encoded values with their corresponding characters.
+
+ There are several ways to specify a database number. The first value
+ found will be used:
+ 1. A ``db`` querystring option, e.g. redis://localhost?db=0
+ 2. If using the redis:// or rediss:// schemes, the path argument
+ of the url, e.g. redis://localhost/0
+ 3. A ``db`` keyword argument to this function.
+
+ If none of these options are specified, the default db=0 is used.
+
+ All querystring options are cast to their appropriate Python types.
+ Boolean arguments can be specified with string values "True"/"False"
+ or "Yes"/"No". Values that cannot be properly cast cause a
+ ``ValueError`` to be raised. Once parsed, the querystring arguments
+ and keyword arguments are passed to the ``ConnectionPool``'s
+ class initializer. In the case of conflicting arguments, querystring
+ arguments always win.
+
+ """
+ return cls(url=url, **kwargs)
+
+ def on_connect(self, connection):
+ """
+ Initialize the connection, authenticate and select a database and send
+ READONLY if it is set during object initialization.
+ """
+ connection.set_parser(ClusterParser)
+ connection.on_connect()
+
+ if self.read_from_replicas:
+ # Sending READONLY command to server to configure connection as
+ # readonly. Since each cluster node may change its server type due
+ # to a failover, we should establish a READONLY connection
+ # regardless of the server type. If this is a primary connection,
+ # READONLY would not affect executing write commands.
+ connection.send_command('READONLY')
+ if str_if_bytes(connection.read_response()) != 'OK':
+ raise ConnectionError('READONLY command failed')
+
+ if self.user_on_connect_func is not None:
+ self.user_on_connect_func(connection)
+
+ def get_redis_connection(self, node):
+ if not node.redis_connection:
+ with self._lock:
+ if not node.redis_connection:
+ self.nodes_manager.create_redis_connections([node])
+ return node.redis_connection
+
+ def get_node(self, host=None, port=None, node_name=None):
+ return self.nodes_manager.get_node(host, port, node_name)
+
+ def get_primaries(self):
+ return self.nodes_manager.get_nodes_by_server_type(PRIMARY)
+
+ def get_replicas(self):
+ return self.nodes_manager.get_nodes_by_server_type(REPLICA)
+
+ def get_random_node(self):
+ return random.choice(list(self.nodes_manager.nodes_cache.values()))
+
+ def get_nodes(self):
+ return list(self.nodes_manager.nodes_cache.values())
+
+ def get_node_from_key(self, key, replica=False):
+ """
+ Get the node that holds the key's slot.
+ If replica set to True but the slot doesn't have any replicas, None is
+ returned.
+ """
+ slot = self.keyslot(key)
+ slot_cache = self.nodes_manager.slots_cache.get(slot)
+ if slot_cache is None or len(slot_cache) == 0:
+ raise SlotNotCoveredError(
+ f'Slot "{slot}" is not covered by the cluster.'
+ )
+ if replica and len(self.nodes_manager.slots_cache[slot]) < 2:
+ return None
+ elif replica:
+ node_idx = 1
+ else:
+ # primary
+ node_idx = 0
+
+ return slot_cache[node_idx]
+
+ def get_default_node(self):
+ """
+ Get the cluster's default node
+ """
+ return self.nodes_manager.default_node
+
+ def set_default_node(self, node):
+ """
+ Set the default node of the cluster.
+ :param node: 'ClusterNode'
+ :return True if the default node was set, else False
+ """
+ if node is None or self.get_node(node_name=node.name) is None:
+ log.info("The requested node does not exist in the cluster, so "
+ "the default node was not changed.")
+ return False
+ self.nodes_manager.default_node = node
+ log.info(f"Changed the default cluster node to {node}")
+ return True
+
+ def pubsub(self, node=None, host=None, port=None, **kwargs):
+ """
+ Allows passing a ClusterNode, or host&port, to get a pubsub instance
+ connected to the specified node
+ """
+ return ClusterPubSub(self, node=node, host=host, port=port, **kwargs)
+
+ def pipeline(self, transaction=None, shard_hint=None):
+ """
+ Cluster impl:
+ Pipelines do not work in cluster mode the same way they
+ do in normal mode. Create a clone of this object so
+ that simulating pipelines will work correctly. Each
+ command will be called directly when used and
+ when calling execute() will only return the result stack.
+ """
+ if shard_hint:
+ raise RedisClusterException(
+ "shard_hint is deprecated in cluster mode")
+
+ if transaction:
+ raise RedisClusterException(
+ "transaction is deprecated in cluster mode")
+
+ return ClusterPipeline(
+ nodes_manager=self.nodes_manager,
+ startup_nodes=self.nodes_manager.startup_nodes,
+ result_callbacks=self.result_callbacks,
+ cluster_response_callbacks=self.cluster_response_callbacks,
+ cluster_error_retry_attempts=self.cluster_error_retry_attempts,
+ read_from_replicas=self.read_from_replicas,
+ reinitialize_steps=self.reinitialize_steps
+ )
+
+ def _determine_nodes(self, *args, **kwargs):
+ command = args[0]
+ nodes_flag = kwargs.pop("nodes_flag", None)
+ if nodes_flag is not None:
+ # nodes flag passed by the user
+ command_flag = nodes_flag
+ else:
+ # get the nodes group for this command if it was predefined
+ command_flag = self.command_flags.get(command)
+ if command_flag:
+ log.debug(f"Target node/s for {command}: {command_flag}")
+ if command_flag == self.__class__.RANDOM:
+ # return a random node
+ return [self.get_random_node()]
+ elif command_flag == self.__class__.PRIMARIES:
+ # return all primaries
+ return self.get_primaries()
+ elif command_flag == self.__class__.REPLICAS:
+ # return all replicas
+ return self.get_replicas()
+ elif command_flag == self.__class__.ALL_NODES:
+ # return all nodes
+ return self.get_nodes()
+ elif command_flag == self.__class__.DEFAULT_NODE:
+ # return the cluster's default node
+ return [self.nodes_manager.default_node]
+ else:
+ # get the node that holds the key's slot
+ slot = self.determine_slot(*args)
+ node = self.nodes_manager.get_node_from_slot(
+ slot, self.read_from_replicas and command in READ_COMMANDS)
+ log.debug(f"Target for {args}: slot {slot}")
+ return [node]
+
+ def _should_reinitialized(self):
+ # In order not to reinitialize the cluster, the user can set
+ # reinitialize_steps to 0.
+ if self.reinitialize_steps == 0:
+ return False
+ else:
+ return self.reinitialize_counter % self.reinitialize_steps == 0
+
+ def keyslot(self, key):
+ """
+ Calculate keyslot for a given key.
+ See Keys distribution model in https://redis.io/topics/cluster-spec
+ """
+ k = self.encoder.encode(key)
+ return key_slot(k)
+
+ def _get_command_keys(self, *args):
+ """
+ Get the keys in the command. If the command has no keys in in, None is
+ returned.
+ """
+ redis_conn = self.get_default_node().redis_connection
+ return self.commands_parser.get_keys(redis_conn, *args)
+
+ def determine_slot(self, *args):
+ """
+ Figure out what slot based on command and args
+ """
+ if self.command_flags.get(args[0]) == SLOT_ID:
+ # The command contains the slot ID
+ return args[1]
+
+ # Get the keys in the command
+ keys = self._get_command_keys(*args)
+ if keys is None or len(keys) == 0:
+ raise RedisClusterException(
+ "No way to dispatch this command to Redis Cluster. "
+ "Missing key.\nYou can execute the command by specifying "
+ f"target nodes.\nCommand: {args}"
+ )
+
+ if len(keys) > 1:
+ # multi-key command, we need to make sure all keys are mapped to
+ # the same slot
+ slots = {self.keyslot(key) for key in keys}
+ if len(slots) != 1:
+ raise RedisClusterException(
+ f"{args[0]} - all keys must map to the same key slot"
+ )
+ return slots.pop()
+ else:
+ # single key command
+ return self.keyslot(keys[0])
+
+ def reinitialize_caches(self):
+ self.nodes_manager.initialize()
+
+ def _is_nodes_flag(self, target_nodes):
+ return isinstance(target_nodes, str) \
+ and target_nodes in self.node_flags
+
+ def _parse_target_nodes(self, target_nodes):
+ if isinstance(target_nodes, list):
+ nodes = target_nodes
+ elif isinstance(target_nodes, ClusterNode):
+ # Supports passing a single ClusterNode as a variable
+ nodes = [target_nodes]
+ elif isinstance(target_nodes, dict):
+ # Supports dictionaries of the format {node_name: node}.
+ # It enables to execute commands with multi nodes as follows:
+ # rc.cluster_save_config(rc.get_primaries())
+ nodes = target_nodes.values()
+ else:
+ raise TypeError(
+ "target_nodes type can be one of the following: "
+ "node_flag (PRIMARIES, REPLICAS, RANDOM, ALL_NODES),"
+ "ClusterNode, list<ClusterNode>, or dict<any, ClusterNode>. "
+ f"The passed type is {type(target_nodes)}"
+ )
+ return nodes
+
+ def execute_command(self, *args, **kwargs):
+ """
+ Wrapper for ClusterDownError and ConnectionError error handling.
+
+ It will try the number of times specified by the config option
+ "self.cluster_error_retry_attempts" which defaults to 3 unless manually
+ configured.
+
+ If it reaches the number of times, the command will raise the exception
+
+ Key argument :target_nodes: can be passed with the following types:
+ nodes_flag: PRIMARIES, REPLICAS, ALL_NODES, RANDOM
+ ClusterNode
+ list<ClusterNode>
+ dict<Any, ClusterNode>
+ """
+ target_nodes_specified = False
+ target_nodes = kwargs.pop("target_nodes", None)
+ if target_nodes is not None and not self._is_nodes_flag(target_nodes):
+ target_nodes = self._parse_target_nodes(target_nodes)
+ target_nodes_specified = True
+ # If ClusterDownError/ConnectionError were thrown, the nodes
+ # and slots cache were reinitialized. We will retry executing the
+ # command with the updated cluster setup only when the target nodes
+ # can be determined again with the new cache tables. Therefore,
+ # when target nodes were passed to this function, we cannot retry
+ # the command execution since the nodes may not be valid anymore
+ # after the tables were reinitialized. So in case of passed target
+ # nodes, retry_attempts will be set to 1.
+ retry_attempts = 1 if target_nodes_specified else \
+ self.cluster_error_retry_attempts
+ exception = None
+ for _ in range(0, retry_attempts):
+ try:
+ res = {}
+ if not target_nodes_specified:
+ # Determine the nodes to execute the command on
+ target_nodes = self._determine_nodes(
+ *args, **kwargs, nodes_flag=target_nodes)
+ if not target_nodes:
+ raise RedisClusterException(
+ f"No targets were found to execute {args} command on")
+ for node in target_nodes:
+ res[node.name] = self._execute_command(
+ node, *args, **kwargs)
+ # Return the processed result
+ return self._process_result(args[0], res, **kwargs)
+ except (ClusterDownError, ConnectionError) as e:
+ # The nodes and slots cache were reinitialized.
+ # Try again with the new cluster setup. All other errors
+ # should be raised.
+ exception = e
+
+ # If it fails the configured number of times then raise exception back
+ # to caller of this method
+ raise exception
+
+ def _execute_command(self, target_node, *args, **kwargs):
+ """
+ Send a command to a node in the cluster
+ """
+ command = args[0]
+ redis_node = None
+ connection = None
+ redirect_addr = None
+ asking = False
+ moved = False
+ ttl = int(self.RedisClusterRequestTTL)
+ connection_error_retry_counter = 0
+
+ while ttl > 0:
+ ttl -= 1
+ try:
+ if asking:
+ target_node = self.get_node(node_name=redirect_addr)
+ elif moved:
+ # MOVED occurred and the slots cache was updated,
+ # refresh the target node
+ slot = self.determine_slot(*args)
+ target_node = self.nodes_manager. \
+ get_node_from_slot(slot, self.read_from_replicas and
+ command in READ_COMMANDS)
+ moved = False
+
+ log.debug(
+ f"Executing command {command} on target node: "
+ f"{target_node.server_type} {target_node.name}"
+ )
+ redis_node = self.get_redis_connection(target_node)
+ connection = get_connection(redis_node, *args, **kwargs)
+ if asking:
+ connection.send_command("ASKING")
+ redis_node.parse_response(connection, "ASKING", **kwargs)
+ asking = False
+
+ connection.send_command(*args)
+ response = redis_node.parse_response(connection, command,
+ **kwargs)
+ if command in self.cluster_response_callbacks:
+ response = self.cluster_response_callbacks[command](
+ response, **kwargs)
+ return response
+
+ except (RedisClusterException, BusyLoadingError):
+ log.exception("RedisClusterException || BusyLoadingError")
+ raise
+ except ConnectionError:
+ log.exception("ConnectionError")
+ # ConnectionError can also be raised if we couldn't get a
+ # connection from the pool before timing out, so check that
+ # this is an actual connection before attempting to disconnect.
+ if connection is not None:
+ connection.disconnect()
+ connection_error_retry_counter += 1
+
+ # Give the node 0.25 seconds to get back up and retry again
+ # with same node and configuration. After 5 attempts then try
+ # to reinitialize the cluster and see if the nodes
+ # configuration has changed or not
+ if connection_error_retry_counter < 5:
+ time.sleep(0.25)
+ else:
+ # Hard force of reinitialize of the node/slots setup
+ # and try again with the new setup
+ self.nodes_manager.initialize()
+ raise
+ except TimeoutError:
+ log.exception("TimeoutError")
+ if connection is not None:
+ connection.disconnect()
+
+ if ttl < self.RedisClusterRequestTTL / 2:
+ time.sleep(0.05)
+ except MovedError as e:
+ # First, we will try to patch the slots/nodes cache with the
+ # redirected node output and try again. If MovedError exceeds
+ # 'reinitialize_steps' number of times, we will force
+ # reinitializing the tables, and then try again.
+ # 'reinitialize_steps' counter will increase faster when the
+ # same client object is shared between multiple threads. To
+ # reduce the frequency you can set this variable in the
+ # RedisCluster constructor.
+ log.exception("MovedError")
+ self.reinitialize_counter += 1
+ if self._should_reinitialized():
+ self.nodes_manager.initialize()
+ else:
+ self.nodes_manager.update_moved_exception(e)
+ moved = True
+ except TryAgainError:
+ log.exception("TryAgainError")
+
+ if ttl < self.RedisClusterRequestTTL / 2:
+ time.sleep(0.05)
+ except AskError as e:
+ log.exception("AskError")
+
+ redirect_addr = get_node_name(host=e.host, port=e.port)
+ asking = True
+ except ClusterDownError as e:
+ log.exception("ClusterDownError")
+ # ClusterDownError can occur during a failover and to get
+ # self-healed, we will try to reinitialize the cluster layout
+ # and retry executing the command
+ time.sleep(0.05)
+ self.nodes_manager.initialize()
+ raise e
+ except ResponseError as e:
+ message = e.__str__()
+ log.exception(f"ResponseError: {message}")
+ raise e
+ except BaseException as e:
+ log.exception("BaseException")
+ if connection:
+ connection.disconnect()
+ raise e
+ finally:
+ if connection is not None:
+ redis_node.connection_pool.release(connection)
+
+ raise ClusterError("TTL exhausted.")
+
+ def close(self):
+ try:
+ with self._lock:
+ if self.nodes_manager:
+ self.nodes_manager.close()
+ except AttributeError:
+ # RedisCluster's __init__ can fail before nodes_manager is set
+ pass
+
+ def _process_result(self, command, res, **kwargs):
+ """
+ Process the result of the executed command.
+ The function would return a dict or a single value.
+
+ :type command: str
+ :type res: dict
+
+ `res` should be in the following format:
+ Dict<node_name, command_result>
+ """
+ if command in self.result_callbacks:
+ return self.result_callbacks[command](command, res, **kwargs)
+ elif len(res) == 1:
+ # When we execute the command on a single node, we can
+ # remove the dictionary and return a single response
+ return list(res.values())[0]
+ else:
+ return res
+
+
+class ClusterNode:
+ def __init__(self, host, port, server_type=None, redis_connection=None):
+ if host == 'localhost':
+ host = socket.gethostbyname(host)
+
+ self.host = host
+ self.port = port
+ self.name = get_node_name(host, port)
+ self.server_type = server_type
+ self.redis_connection = redis_connection
+
+ def __repr__(self):
+ return (
+ f'[host={self.host},'
+ f'port={self.port},'
+ f'name={self.name},'
+ f'server_type={self.server_type},'
+ f'redis_connection={self.redis_connection}]'
+ )
+
+ def __eq__(self, obj):
+ return isinstance(obj, ClusterNode) and obj.name == self.name
+
+
+class LoadBalancer:
+ """
+ Round-Robin Load Balancing
+ """
+
+ def __init__(self, start_index=0):
+ self.primary_to_idx = {}
+ self.start_index = start_index
+
+ def get_server_index(self, primary, list_size):
+ server_index = self.primary_to_idx.setdefault(primary,
+ self.start_index)
+ # Update the index
+ self.primary_to_idx[primary] = (server_index + 1) % list_size
+ return server_index
+
+ def reset(self):
+ self.primary_to_idx.clear()
+
+
+class NodesManager:
+ def __init__(self, startup_nodes, from_url=False,
+ require_full_coverage=True, skip_full_coverage_check=False,
+ lock=None, **kwargs):
+ self.nodes_cache = {}
+ self.slots_cache = {}
+ self.startup_nodes = {}
+ self.default_node = None
+ self.populate_startup_nodes(startup_nodes)
+ self.from_url = from_url
+ self._require_full_coverage = require_full_coverage
+ self._skip_full_coverage_check = skip_full_coverage_check
+ self._moved_exception = None
+ self.connection_kwargs = kwargs
+ self.read_load_balancer = LoadBalancer()
+ if lock is None:
+ lock = threading.Lock()
+ self._lock = lock
+ self.initialize()
+
+ def get_node(self, host=None, port=None, node_name=None):
+ """
+ Get the requested node from the cluster's nodes.
+ nodes.
+ :return: ClusterNode if the node exists, else None
+ """
+ if host and port:
+ # the user passed host and port
+ if host == "localhost":
+ host = socket.gethostbyname(host)
+ return self.nodes_cache.get(get_node_name(host=host, port=port))
+ elif node_name:
+ return self.nodes_cache.get(node_name)
+ else:
+ log.error(
+ "get_node requires one of the following: "
+ "1. node name "
+ "2. host and port"
+ )
+ return None
+
+ def update_moved_exception(self, exception):
+ self._moved_exception = exception
+
+ def _update_moved_slots(self):
+ """
+ Update the slot's node with the redirected one
+ """
+ e = self._moved_exception
+ redirected_node = self.get_node(host=e.host, port=e.port)
+ if redirected_node is not None:
+ # The node already exists
+ if redirected_node.server_type is not PRIMARY:
+ # Update the node's server type
+ redirected_node.server_type = PRIMARY
+ else:
+ # This is a new node, we will add it to the nodes cache
+ redirected_node = ClusterNode(e.host, e.port, PRIMARY)
+ self.nodes_cache[redirected_node.name] = redirected_node
+ if redirected_node in self.slots_cache[e.slot_id]:
+ # The MOVED error resulted from a failover, and the new slot owner
+ # had previously been a replica.
+ old_primary = self.slots_cache[e.slot_id][0]
+ # Update the old primary to be a replica and add it to the end of
+ # the slot's node list
+ old_primary.server_type = REPLICA
+ self.slots_cache[e.slot_id].append(old_primary)
+ # Remove the old replica, which is now a primary, from the slot's
+ # node list
+ self.slots_cache[e.slot_id].remove(redirected_node)
+ # Override the old primary with the new one
+ self.slots_cache[e.slot_id][0] = redirected_node
+ if self.default_node == old_primary:
+ # Update the default node with the new primary
+ self.default_node = redirected_node
+ else:
+ # The new slot owner is a new server, or a server from a different
+ # shard. We need to remove all current nodes from the slot's list
+ # (including replications) and add just the new node.
+ self.slots_cache[e.slot_id] = [redirected_node]
+ # Reset moved_exception
+ self._moved_exception = None
+
+ def get_node_from_slot(self, slot, read_from_replicas=False,
+ server_type=None):
+ """
+ Gets a node that servers this hash slot
+ """
+ if self._moved_exception:
+ with self._lock:
+ if self._moved_exception:
+ self._update_moved_slots()
+
+ if self.slots_cache.get(slot) is None or \
+ len(self.slots_cache[slot]) == 0:
+ raise SlotNotCoveredError(
+ f'Slot "{slot}" not covered by the cluster. '
+ f'"require_full_coverage={self._require_full_coverage}"'
+ )
+
+ if read_from_replicas is True:
+ # get the server index in a Round-Robin manner
+ primary_name = self.slots_cache[slot][0].name
+ node_idx = self.read_load_balancer.get_server_index(
+ primary_name, len(self.slots_cache[slot]))
+ elif (
+ server_type is None
+ or server_type == PRIMARY
+ or len(self.slots_cache[slot]) == 1
+ ):
+ # return a primary
+ node_idx = 0
+ else:
+ # return a replica
+ # randomly choose one of the replicas
+ node_idx = random.randint(
+ 1, len(self.slots_cache[slot]) - 1)
+
+ return self.slots_cache[slot][node_idx]
+
+ def get_nodes_by_server_type(self, server_type):
+ """
+ Get all nodes with the specified server type
+ :param server_type: 'primary' or 'replica'
+ :return: list of ClusterNode
+ """
+ return [
+ node
+ for node in self.nodes_cache.values()
+ if node.server_type == server_type
+ ]
+
+ def populate_startup_nodes(self, nodes):
+ """
+ Populate all startup nodes and filters out any duplicates
+ """
+ for n in nodes:
+ self.startup_nodes[n.name] = n
+
+ def cluster_require_full_coverage(self, cluster_nodes):
+ """
+ if exists 'cluster-require-full-coverage no' config on redis servers,
+ then even all slots are not covered, cluster still will be able to
+ respond
+ """
+
+ def node_require_full_coverage(node):
+ try:
+ return ("yes" in node.redis_connection.config_get(
+ "cluster-require-full-coverage").values()
+ )
+ except ConnectionError:
+ return False
+ except Exception as e:
+ raise RedisClusterException(
+ 'ERROR sending "config get cluster-require-full-coverage"'
+ f' command to redis server: {node.name}, {e}'
+ )
+
+ # at least one node should have cluster-require-full-coverage yes
+ return any(node_require_full_coverage(node)
+ for node in cluster_nodes.values())
+
+ def check_slots_coverage(self, slots_cache):
+ # Validate if all slots are covered or if we should try next
+ # startup node
+ for i in range(0, REDIS_CLUSTER_HASH_SLOTS):
+ if i not in slots_cache:
+ return False
+ return True
+
+ def create_redis_connections(self, nodes):
+ """
+ This function will create a redis connection to all nodes in :nodes:
+ """
+ for node in nodes:
+ if node.redis_connection is None:
+ node.redis_connection = self.create_redis_node(
+ host=node.host,
+ port=node.port,
+ **self.connection_kwargs,
+ )
+
+ def create_redis_node(self, host, port, **kwargs):
+ if self.from_url:
+ # Create a redis node with a costumed connection pool
+ kwargs.update({"host": host})
+ kwargs.update({"port": port})
+ r = Redis(connection_pool=ConnectionPool(**kwargs))
+ else:
+ r = Redis(
+ host=host,
+ port=port,
+ **kwargs
+ )
+ return r
+
+ def initialize(self):
+ """
+ Initializes the nodes cache, slots cache and redis connections.
+ :startup_nodes:
+ Responsible for discovering other nodes in the cluster
+ """
+ log.debug("Initializing the nodes' topology of the cluster")
+ self.reset()
+ tmp_nodes_cache = {}
+ tmp_slots = {}
+ disagreements = []
+ startup_nodes_reachable = False
+ kwargs = self.connection_kwargs
+ for startup_node in self.startup_nodes.values():
+ try:
+ if startup_node.redis_connection:
+ r = startup_node.redis_connection
+ else:
+ # Create a new Redis connection and let Redis decode the
+ # responses so we won't need to handle that
+ copy_kwargs = copy.deepcopy(kwargs)
+ copy_kwargs.update({"decode_responses": True,
+ "encoding": "utf-8"})
+ r = self.create_redis_node(
+ startup_node.host, startup_node.port, **copy_kwargs)
+ self.startup_nodes[startup_node.name].redis_connection = r
+ cluster_slots = r.execute_command("CLUSTER SLOTS")
+ startup_nodes_reachable = True
+ except (ConnectionError, TimeoutError) as e:
+ msg = e.__str__
+ log.exception('An exception occurred while trying to'
+ ' initialize the cluster using the seed node'
+ f' {startup_node.name}:\n{msg}')
+ continue
+ except ResponseError as e:
+ log.exception(
+ 'ReseponseError sending "cluster slots" to redis server')
+
+ # Isn't a cluster connection, so it won't parse these
+ # exceptions automatically
+ message = e.__str__()
+ if "CLUSTERDOWN" in message or "MASTERDOWN" in message:
+ continue
+ else:
+ raise RedisClusterException(
+ 'ERROR sending "cluster slots" command to redis '
+ f'server: {startup_node}. error: {message}'
+ )
+ except Exception as e:
+ message = e.__str__()
+ raise RedisClusterException(
+ 'ERROR sending "cluster slots" command to redis '
+ f'server: {startup_node}. error: {message}'
+ )
+
+ # CLUSTER SLOTS command results in the following output:
+ # [[slot_section[from_slot,to_slot,master,replica1,...,replicaN]]]
+ # where each node contains the following list: [IP, port, node_id]
+ # Therefore, cluster_slots[0][2][0] will be the IP address of the
+ # primary node of the first slot section.
+ # If there's only one server in the cluster, its ``host`` is ''
+ # Fix it to the host in startup_nodes
+ if (len(cluster_slots) == 1
+ and len(cluster_slots[0][2][0]) == 0
+ and len(self.startup_nodes) == 1):
+ cluster_slots[0][2][0] = startup_node.host
+
+ for slot in cluster_slots:
+ primary_node = slot[2]
+ host = primary_node[0]
+ if host == "":
+ host = startup_node.host
+ port = int(primary_node[1])
+
+ target_node = tmp_nodes_cache.get(get_node_name(host, port))
+ if target_node is None:
+ target_node = ClusterNode(host, port, PRIMARY)
+ # add this node to the nodes cache
+ tmp_nodes_cache[target_node.name] = target_node
+
+ for i in range(int(slot[0]), int(slot[1]) + 1):
+ if i not in tmp_slots:
+ tmp_slots[i] = []
+ tmp_slots[i].append(target_node)
+ replica_nodes = [slot[j] for j in range(3, len(slot))]
+
+ for replica_node in replica_nodes:
+ host = replica_node[0]
+ port = replica_node[1]
+
+ target_replica_node = tmp_nodes_cache.get(
+ get_node_name(host, port))
+ if target_replica_node is None:
+ target_replica_node = ClusterNode(
+ host, port, REPLICA)
+ tmp_slots[i].append(target_replica_node)
+ # add this node to the nodes cache
+ tmp_nodes_cache[
+ target_replica_node.name
+ ] = target_replica_node
+ else:
+ # Validate that 2 nodes want to use the same slot cache
+ # setup
+ tmp_slot = tmp_slots[i][0]
+ if tmp_slot.name != target_node.name:
+ disagreements.append(
+ f'{tmp_slot.name} vs {target_node.name} on slot: {i}'
+ )
+
+ if len(disagreements) > 5:
+ raise RedisClusterException(
+ f'startup_nodes could not agree on a valid '
+ f'slots cache: {", ".join(disagreements)}'
+ )
+
+ if not startup_nodes_reachable:
+ raise RedisClusterException(
+ "Redis Cluster cannot be connected. Please provide at least "
+ "one reachable node. "
+ )
+
+ # Create Redis connections to all nodes
+ self.create_redis_connections(list(tmp_nodes_cache.values()))
+
+ fully_covered = self.check_slots_coverage(tmp_slots)
+ # Check if the slots are not fully covered
+ if not fully_covered and self._require_full_coverage:
+ # Despite the requirement that the slots be covered, there
+ # isn't a full coverage
+ raise RedisClusterException(
+ f'All slots are not covered after query all startup_nodes. '
+ f'{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} covered...'
+ )
+ elif not fully_covered and not self._require_full_coverage:
+ # The user set require_full_coverage to False.
+ # In case of full coverage requirement in the cluster's Redis
+ # configurations, we will raise an exception. Otherwise, we may
+ # continue with partial coverage.
+ # see Redis Cluster configuration parameters in
+ # https://redis.io/topics/cluster-tutorial
+ if not self._skip_full_coverage_check and \
+ self.cluster_require_full_coverage(tmp_nodes_cache):
+ raise RedisClusterException(
+ 'Not all slots are covered but the cluster\'s '
+ 'configuration requires full coverage. Set '
+ 'cluster-require-full-coverage configuration to no on '
+ 'all of the cluster nodes if you wish the cluster to '
+ 'be able to serve without being fully covered.'
+ f'{len(self.slots_cache)} of {REDIS_CLUSTER_HASH_SLOTS} covered...'
+ )
+
+ # Set the tmp variables to the real variables
+ self.nodes_cache = tmp_nodes_cache
+ self.slots_cache = tmp_slots
+ # Set the default node
+ self.default_node = self.get_nodes_by_server_type(PRIMARY)[0]
+ # Populate the startup nodes with all discovered nodes
+ self.populate_startup_nodes(self.nodes_cache.values())
+
+ def close(self):
+ self.default_node = None
+ for node in self.nodes_cache.values():
+ if node.redis_connection:
+ node.redis_connection.close()
+
+ def reset(self):
+ try:
+ self.read_load_balancer.reset()
+ except TypeError:
+ # The read_load_balancer is None, do nothing
+ pass
+
+
+class ClusterPubSub(PubSub):
+ """
+ Wrapper for PubSub class.
+
+ IMPORTANT: before using ClusterPubSub, read about the known limitations
+ with pubsub in Cluster mode and learn how to workaround them:
+ https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
+ """
+
+ def __init__(self, redis_cluster, node=None, host=None, port=None,
+ **kwargs):
+ """
+ When a pubsub instance is created without specifying a node, a single
+ node will be transparently chosen for the pubsub connection on the
+ first command execution. The node will be determined by:
+ 1. Hashing the channel name in the request to find its keyslot
+ 2. Selecting a node that handles the keyslot: If read_from_replicas is
+ set to true, a replica can be selected.
+
+ :type redis_cluster: RedisCluster
+ :type node: ClusterNode
+ :type host: str
+ :type port: int
+ """
+ log.info("Creating new instance of ClusterPubSub")
+ self.node = None
+ self.set_pubsub_node(redis_cluster, node, host, port)
+ connection_pool = None if self.node is None else \
+ redis_cluster.get_redis_connection(self.node).connection_pool
+ self.cluster = redis_cluster
+ super().__init__(**kwargs, connection_pool=connection_pool,
+ encoder=redis_cluster.encoder)
+
+ def set_pubsub_node(self, cluster, node=None, host=None, port=None):
+ """
+ The pubsub node will be set according to the passed node, host and port
+ When none of the node, host, or port are specified - the node is set
+ to None and will be determined by the keyslot of the channel in the
+ first command to be executed.
+ RedisClusterException will be thrown if the passed node does not exist
+ in the cluster.
+ If host is passed without port, or vice versa, a DataError will be
+ thrown.
+ :type cluster: RedisCluster
+ :type node: ClusterNode
+ :type host: str
+ :type port: int
+ """
+ if node is not None:
+ # node is passed by the user
+ self._raise_on_invalid_node(cluster, node, node.host, node.port)
+ pubsub_node = node
+ elif host is not None and port is not None:
+ # host and port passed by the user
+ node = cluster.get_node(host=host, port=port)
+ self._raise_on_invalid_node(cluster, node, host, port)
+ pubsub_node = node
+ elif any([host, port]) is True:
+ # only 'host' or 'port' passed
+ raise DataError('Passing a host requires passing a port, '
+ 'and vice versa')
+ else:
+ # nothing passed by the user. set node to None
+ pubsub_node = None
+
+ self.node = pubsub_node
+
+ def get_pubsub_node(self):
+ """
+ Get the node that is being used as the pubsub connection
+ """
+ return self.node
+
+ def _raise_on_invalid_node(self, redis_cluster, node, host, port):
+ """
+ Raise a RedisClusterException if the node is None or doesn't exist in
+ the cluster.
+ """
+ if node is None or redis_cluster.get_node(node_name=node.name) is None:
+ raise RedisClusterException(
+ f"Node {host}:{port} doesn't exist in the cluster")
+
+ def execute_command(self, *args, **kwargs):
+ """
+ Execute a publish/subscribe command.
+
+ Taken code from redis-py and tweak to make it work within a cluster.
+ """
+ # NOTE: don't parse the response in this function -- it could pull a
+ # legitimate message off the stack if the connection is already
+ # subscribed to one or more channels
+
+ if self.connection is None:
+ if self.connection_pool is None:
+ if len(args) > 1:
+ # Hash the first channel and get one of the nodes holding
+ # this slot
+ channel = args[1]
+ slot = self.cluster.keyslot(channel)
+ node = self.cluster.nodes_manager. \
+ get_node_from_slot(slot, self.cluster.
+ read_from_replicas)
+ else:
+ # Get a random node
+ node = self.cluster.get_random_node()
+ self.node = node
+ redis_connection = self.cluster.get_redis_connection(node)
+ self.connection_pool = redis_connection.connection_pool
+ self.connection = self.connection_pool.get_connection(
+ 'pubsub',
+ self.shard_hint
+ )
+ # register a callback that re-subscribes to any channels we
+ # were listening to when we were disconnected
+ self.connection.register_connect_callback(self.on_connect)
+ connection = self.connection
+ self._execute(connection, connection.send_command, *args)
+
+ def get_redis_connection(self):
+ """
+ Get the Redis connection of the pubsub connected node.
+ """
+ if self.node is not None:
+ return self.node.redis_connection
+
+
+ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError,
+ MovedError, AskError, TryAgainError)
+
+
+class ClusterPipeline(RedisCluster):
+ """
+ Support for Redis pipeline
+ in cluster mode
+ """
+
+ def __init__(self, nodes_manager, result_callbacks=None,
+ cluster_response_callbacks=None, startup_nodes=None,
+ read_from_replicas=False, cluster_error_retry_attempts=3,
+ reinitialize_steps=10, **kwargs):
+ """
+ """
+ log.info("Creating new instance of ClusterPipeline")
+ self.command_stack = []
+ self.nodes_manager = nodes_manager
+ self.refresh_table_asap = False
+ self.result_callbacks = (result_callbacks or
+ self.__class__.RESULT_CALLBACKS.copy())
+ self.startup_nodes = startup_nodes if startup_nodes else []
+ self.read_from_replicas = read_from_replicas
+ self.command_flags = self.__class__.COMMAND_FLAGS.copy()
+ self.cluster_response_callbacks = cluster_response_callbacks
+ self.cluster_error_retry_attempts = cluster_error_retry_attempts
+ self.reinitialize_counter = 0
+ self.reinitialize_steps = reinitialize_steps
+ self.encoder = Encoder(
+ kwargs.get("encoding", "utf-8"),
+ kwargs.get("encoding_errors", "strict"),
+ kwargs.get("decode_responses", False),
+ )
+
+ # The commands parser refers to the parent
+ # so that we don't push the COMMAND command
+ # onto the stack
+ self.commands_parser = CommandsParser(super())
+
+ def __repr__(self):
+ """
+ """
+ return f"{type(self).__name__}"
+
+ def __enter__(self):
+ """
+ """
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """
+ """
+ self.reset()
+
+ def __del__(self):
+ try:
+ self.reset()
+ except Exception:
+ pass
+
+ def __len__(self):
+ """
+ """
+ return len(self.command_stack)
+
+ def __nonzero__(self):
+ "Pipeline instances should always evaluate to True on Python 2.7"
+ return True
+
+ def __bool__(self):
+ "Pipeline instances should always evaluate to True on Python 3+"
+ return True
+
+ def execute_command(self, *args, **kwargs):
+ """
+ Wrapper function for pipeline_execute_command
+ """
+ return self.pipeline_execute_command(*args, **kwargs)
+
+ def pipeline_execute_command(self, *args, **options):
+ """
+ Appends the executed command to the pipeline's command stack
+ """
+ self.command_stack.append(
+ PipelineCommand(args, options, len(self.command_stack)))
+ return self
+
+ def raise_first_error(self, stack):
+ """
+ Raise the first exception on the stack
+ """
+ for c in stack:
+ r = c.result
+ if isinstance(r, Exception):
+ self.annotate_exception(r, c.position + 1, c.args)
+ raise r
+
+ def annotate_exception(self, exception, number, command):
+ """
+ Provides extra context to the exception prior to it being handled
+ """
+ cmd = ' '.join(map(safe_str, command))
+ msg = (
+ f'Command # {number} ({cmd}) of pipeline '
+ f'caused error: {exception.args[0]}'
+ )
+ exception.args = (msg,) + exception.args[1:]
+
+ def execute(self, raise_on_error=True):
+ """
+ Execute all the commands in the current pipeline
+ """
+ stack = self.command_stack
+ try:
+ return self.send_cluster_commands(stack, raise_on_error)
+ finally:
+ self.reset()
+
+ def reset(self):
+ """
+ Reset back to empty pipeline.
+ """
+ self.command_stack = []
+
+ self.scripts = set()
+
+ # TODO: Implement
+ # make sure to reset the connection state in the event that we were
+ # watching something
+ # if self.watching and self.connection:
+ # try:
+ # # call this manually since our unwatch or
+ # # immediate_execute_command methods can call reset()
+ # self.connection.send_command('UNWATCH')
+ # self.connection.read_response()
+ # except ConnectionError:
+ # # disconnect will also remove any previous WATCHes
+ # self.connection.disconnect()
+
+ # clean up the other instance attributes
+ self.watching = False
+ self.explicit_transaction = False
+
+ # TODO: Implement
+ # we can safely return the connection to the pool here since we're
+ # sure we're no longer WATCHing anything
+ # if self.connection:
+ # self.connection_pool.release(self.connection)
+ # self.connection = None
+
+ def send_cluster_commands(self, stack,
+ raise_on_error=True, allow_redirections=True):
+ """
+ Wrapper for CLUSTERDOWN error handling.
+
+ If the cluster reports it is down it is assumed that:
+ - connection_pool was disconnected
+ - connection_pool was reseted
+ - refereh_table_asap set to True
+
+ It will try the number of times specified by
+ the config option "self.cluster_error_retry_attempts"
+ which defaults to 3 unless manually configured.
+
+ If it reaches the number of times, the command will
+ raises ClusterDownException.
+ """
+ if not stack:
+ return []
+
+ for _ in range(0, self.cluster_error_retry_attempts):
+ try:
+ return self._send_cluster_commands(
+ stack,
+ raise_on_error=raise_on_error,
+ allow_redirections=allow_redirections,
+ )
+ except ClusterDownError:
+ # Try again with the new cluster setup. All other errors
+ # should be raised.
+ pass
+
+ # If it fails the configured number of times then raise
+ # exception back to caller of this method
+ raise ClusterDownError(
+ "CLUSTERDOWN error. Unable to rebuild the cluster")
+
+ def _send_cluster_commands(self, stack,
+ raise_on_error=True,
+ allow_redirections=True):
+ """
+ Send a bunch of cluster commands to the redis cluster.
+
+ `allow_redirections` If the pipeline should follow
+ `ASK` & `MOVED` responses automatically. If set
+ to false it will raise RedisClusterException.
+ """
+ # the first time sending the commands we send all of
+ # the commands that were queued up.
+ # if we have to run through it again, we only retry
+ # the commands that failed.
+ attempt = sorted(stack, key=lambda x: x.position)
+
+ # build a list of node objects based on node names we need to
+ nodes = {}
+
+ # as we move through each command that still needs to be processed,
+ # we figure out the slot number that command maps to, then from
+ # the slot determine the node.
+ for c in attempt:
+ # refer to our internal node -> slot table that
+ # tells us where a given
+ # command should route to.
+ slot = self.determine_slot(*c.args)
+ node = self.nodes_manager.get_node_from_slot(
+ slot, self.read_from_replicas and c.args[0] in READ_COMMANDS)
+
+ # now that we know the name of the node
+ # ( it's just a string in the form of host:port )
+ # we can build a list of commands for each node.
+ node_name = node.name
+ if node_name not in nodes:
+ redis_node = self.get_redis_connection(node)
+ connection = get_connection(redis_node, c.args)
+ nodes[node_name] = NodeCommands(redis_node.parse_response,
+ redis_node.connection_pool,
+ connection)
+
+ nodes[node_name].append(c)
+
+ # send the commands in sequence.
+ # we write to all the open sockets for each node first,
+ # before reading anything
+ # this allows us to flush all the requests out across the
+ # network essentially in parallel
+ # so that we can read them all in parallel as they come back.
+ # we dont' multiplex on the sockets as they come available,
+ # but that shouldn't make too much difference.
+ node_commands = nodes.values()
+ for n in node_commands:
+ n.write()
+
+ for n in node_commands:
+ n.read()
+
+ # release all of the redis connections we allocated earlier
+ # back into the connection pool.
+ # we used to do this step as part of a try/finally block,
+ # but it is really dangerous to
+ # release connections back into the pool if for some
+ # reason the socket has data still left in it
+ # from a previous operation. The write and
+ # read operations already have try/catch around them for
+ # all known types of errors including connection
+ # and socket level errors.
+ # So if we hit an exception, something really bad
+ # happened and putting any oF
+ # these connections back into the pool is a very bad idea.
+ # the socket might have unread buffer still sitting in it,
+ # and then the next time we read from it we pass the
+ # buffered result back from a previous command and
+ # every single request after to that connection will always get
+ # a mismatched result.
+ for n in nodes.values():
+ n.connection_pool.release(n.connection)
+
+ # if the response isn't an exception it is a
+ # valid response from the node
+ # we're all done with that command, YAY!
+ # if we have more commands to attempt, we've run into problems.
+ # collect all the commands we are allowed to retry.
+ # (MOVED, ASK, or connection errors or timeout errors)
+ attempt = sorted((c for c in attempt
+ if isinstance(c.result, ERRORS_ALLOW_RETRY)),
+ key=lambda x: x.position)
+ if attempt and allow_redirections:
+ # RETRY MAGIC HAPPENS HERE!
+ # send these remaing comamnds one at a time using `execute_command`
+ # in the main client. This keeps our retry logic
+ # in one place mostly,
+ # and allows us to be more confident in correctness of behavior.
+ # at this point any speed gains from pipelining have been lost
+ # anyway, so we might as well make the best
+ # attempt to get the correct behavior.
+ #
+ # The client command will handle retries for each
+ # individual command sequentially as we pass each
+ # one into `execute_command`. Any exceptions
+ # that bubble out should only appear once all
+ # retries have been exhausted.
+ #
+ # If a lot of commands have failed, we'll be setting the
+ # flag to rebuild the slots table from scratch.
+ # So MOVED errors should correct themselves fairly quickly.
+ log.exception(
+ f'An exception occurred during pipeline execution. '
+ f'args: {attempt[-1].args}, '
+ f'error: {type(attempt[-1].result).__name__} '
+ f'{str(attempt[-1].result)}'
+ )
+ self.reinitialize_counter += 1
+ if self._should_reinitialized():
+ self.nodes_manager.initialize()
+ for c in attempt:
+ try:
+ # send each command individually like we
+ # do in the main client.
+ c.result = super().execute_command(*c.args, **c.options)
+ except RedisError as e:
+ c.result = e
+
+ # turn the response back into a simple flat array that corresponds
+ # to the sequence of commands issued in the stack in pipeline.execute()
+ response = [c.result for c in sorted(stack, key=lambda x: x.position)]
+
+ if raise_on_error:
+ self.raise_first_error(stack)
+
+ return response
+
+ def _fail_on_redirect(self, allow_redirections):
+ """
+ """
+ if not allow_redirections:
+ raise RedisClusterException(
+ "ASK & MOVED redirection not allowed in this pipeline")
+
+ def eval(self):
+ """
+ """
+ raise RedisClusterException("method eval() is not implemented")
+
+ def multi(self):
+ """
+ """
+ raise RedisClusterException("method multi() is not implemented")
+
+ def immediate_execute_command(self, *args, **options):
+ """
+ """
+ raise RedisClusterException(
+ "method immediate_execute_command() is not implemented")
+
+ def _execute_transaction(self, *args, **kwargs):
+ """
+ """
+ raise RedisClusterException(
+ "method _execute_transaction() is not implemented")
+
+ def load_scripts(self):
+ """
+ """
+ raise RedisClusterException(
+ "method load_scripts() is not implemented")
+
+ def watch(self, *names):
+ """
+ """
+ raise RedisClusterException("method watch() is not implemented")
+
+ def unwatch(self):
+ """
+ """
+ raise RedisClusterException("method unwatch() is not implemented")
+
+ def script_load_for_pipeline(self, *args, **kwargs):
+ """
+ """
+ raise RedisClusterException(
+ "method script_load_for_pipeline() is not implemented")
+
+ def delete(self, *names):
+ """
+ "Delete a key specified by ``names``"
+ """
+ if len(names) != 1:
+ raise RedisClusterException(
+ "deleting multiple keys is not "
+ "implemented in pipeline command")
+
+ return self.execute_command('DEL', names[0])
+
+
+def block_pipeline_command(func):
+ """
+ Prints error because some pipelined commands should
+ be blocked when running in cluster-mode
+ """
+
+ def inner(*args, **kwargs):
+ raise RedisClusterException(
+ f"ERROR: Calling pipelined function {func.__name__} is blocked when "
+ f"running redis in cluster mode...")
+
+ return inner
+
+
+# Blocked pipeline commands
+ClusterPipeline.bitop = block_pipeline_command(RedisCluster.bitop)
+ClusterPipeline.brpoplpush = block_pipeline_command(RedisCluster.brpoplpush)
+ClusterPipeline.client_getname = \
+ block_pipeline_command(RedisCluster.client_getname)
+ClusterPipeline.client_list = block_pipeline_command(RedisCluster.client_list)
+ClusterPipeline.client_setname = \
+ block_pipeline_command(RedisCluster.client_setname)
+ClusterPipeline.config_set = block_pipeline_command(RedisCluster.config_set)
+ClusterPipeline.dbsize = block_pipeline_command(RedisCluster.dbsize)
+ClusterPipeline.flushall = block_pipeline_command(RedisCluster.flushall)
+ClusterPipeline.flushdb = block_pipeline_command(RedisCluster.flushdb)
+ClusterPipeline.keys = block_pipeline_command(RedisCluster.keys)
+ClusterPipeline.mget = block_pipeline_command(RedisCluster.mget)
+ClusterPipeline.move = block_pipeline_command(RedisCluster.move)
+ClusterPipeline.mset = block_pipeline_command(RedisCluster.mset)
+ClusterPipeline.msetnx = block_pipeline_command(RedisCluster.msetnx)
+ClusterPipeline.pfmerge = block_pipeline_command(RedisCluster.pfmerge)
+ClusterPipeline.pfcount = block_pipeline_command(RedisCluster.pfcount)
+ClusterPipeline.ping = block_pipeline_command(RedisCluster.ping)
+ClusterPipeline.publish = block_pipeline_command(RedisCluster.publish)
+ClusterPipeline.randomkey = block_pipeline_command(RedisCluster.randomkey)
+ClusterPipeline.rename = block_pipeline_command(RedisCluster.rename)
+ClusterPipeline.renamenx = block_pipeline_command(RedisCluster.renamenx)
+ClusterPipeline.rpoplpush = block_pipeline_command(RedisCluster.rpoplpush)
+ClusterPipeline.scan = block_pipeline_command(RedisCluster.scan)
+ClusterPipeline.sdiff = block_pipeline_command(RedisCluster.sdiff)
+ClusterPipeline.sdiffstore = block_pipeline_command(RedisCluster.sdiffstore)
+ClusterPipeline.sinter = block_pipeline_command(RedisCluster.sinter)
+ClusterPipeline.sinterstore = block_pipeline_command(RedisCluster.sinterstore)
+ClusterPipeline.smove = block_pipeline_command(RedisCluster.smove)
+ClusterPipeline.sort = block_pipeline_command(RedisCluster.sort)
+ClusterPipeline.sunion = block_pipeline_command(RedisCluster.sunion)
+ClusterPipeline.sunionstore = block_pipeline_command(RedisCluster.sunionstore)
+ClusterPipeline.readwrite = block_pipeline_command(RedisCluster.readwrite)
+ClusterPipeline.readonly = block_pipeline_command(RedisCluster.readonly)
+
+
+class PipelineCommand:
+ """
+ """
+
+ def __init__(self, args, options=None, position=None):
+ self.args = args
+ if options is None:
+ options = {}
+ self.options = options
+ self.position = position
+ self.result = None
+ self.node = None
+ self.asking = False
+
+
+class NodeCommands:
+ """
+ """
+
+ def __init__(self, parse_response, connection_pool, connection):
+ """
+ """
+ self.parse_response = parse_response
+ self.connection_pool = connection_pool
+ self.connection = connection
+ self.commands = []
+
+ def append(self, c):
+ """
+ """
+ self.commands.append(c)
+
+ def write(self):
+ """
+ Code borrowed from Redis so it can be fixed
+ """
+ connection = self.connection
+ commands = self.commands
+
+ # We are going to clobber the commands with the write, so go ahead
+ # and ensure that nothing is sitting there from a previous run.
+ for c in commands:
+ c.result = None
+
+ # build up all commands into a single request to increase network perf
+ # send all the commands and catch connection and timeout errors.
+ try:
+ connection.send_packed_command(
+ connection.pack_commands([c.args for c in commands]))
+ except (ConnectionError, TimeoutError) as e:
+ for c in commands:
+ c.result = e
+
+ def read(self):
+ """
+ """
+ connection = self.connection
+ for c in self.commands:
+
+ # if there is a result on this command,
+ # it means we ran into an exception
+ # like a connection error. Trying to parse
+ # a response on a connection that
+ # is no longer open will result in a
+ # connection error raised by redis-py.
+ # but redis-py doesn't check in parse_response
+ # that the sock object is
+ # still set and if you try to
+ # read from a closed connection, it will
+ # result in an AttributeError because
+ # it will do a readline() call on None.
+ # This can have all kinds of nasty side-effects.
+ # Treating this case as a connection error
+ # is fine because it will dump
+ # the connection object back into the
+ # pool and on the next write, it will
+ # explicitly open the connection and all will be well.
+ if c.result is None:
+ try:
+ c.result = self.parse_response(
+ connection, c.args[0], **c.options)
+ except (ConnectionError, TimeoutError) as e:
+ for c in self.commands:
+ c.result = e
+ return
+ except RedisError:
+ c.result = sys.exc_info()[1]
diff --git a/redis/commands/__init__.py b/redis/commands/__init__.py
index f1ddaaa..a4728d0 100644
--- a/redis/commands/__init__.py
+++ b/redis/commands/__init__.py
@@ -1,11 +1,15 @@
+from .cluster import ClusterCommands
from .core import CoreCommands
-from .redismodules import RedisModuleCommands
from .helpers import list_or_args
+from .parser import CommandsParser
+from .redismodules import RedisModuleCommands
from .sentinel import SentinelCommands
__all__ = [
+ 'ClusterCommands',
+ 'CommandsParser',
'CoreCommands',
+ 'list_or_args',
'RedisModuleCommands',
- 'SentinelCommands',
- 'list_or_args'
+ 'SentinelCommands'
]
diff --git a/redis/commands/cluster.py b/redis/commands/cluster.py
new file mode 100644
index 0000000..e6b0a08
--- /dev/null
+++ b/redis/commands/cluster.py
@@ -0,0 +1,922 @@
+from redis.exceptions import (
+ ConnectionError,
+ DataError,
+ RedisError,
+)
+from redis.crc import key_slot
+from .core import DataAccessCommands
+from .helpers import list_or_args
+
+
+class ClusterMultiKeyCommands:
+ """
+ A class containing commands that handle more than one key
+ """
+
+ def _partition_keys_by_slot(self, keys):
+ """
+ Split keys into a dictionary that maps a slot to
+ a list of keys.
+ """
+ slots_to_keys = {}
+ for key in keys:
+ k = self.encoder.encode(key)
+ slot = key_slot(k)
+ slots_to_keys.setdefault(slot, []).append(key)
+
+ return slots_to_keys
+
+ def mget_nonatomic(self, keys, *args):
+ """
+ Splits the keys into different slots and then calls MGET
+ for the keys of every slot. This operation will not be atomic
+ if keys belong to more than one slot.
+
+ Returns a list of values ordered identically to ``keys``
+ """
+
+ from redis.client import EMPTY_RESPONSE
+ options = {}
+ if not args:
+ options[EMPTY_RESPONSE] = []
+
+ # Concatenate all keys into a list
+ keys = list_or_args(keys, args)
+ # Split keys into slots
+ slots_to_keys = self._partition_keys_by_slot(keys)
+
+ # Call MGET for every slot and concatenate
+ # the results
+ # We must make sure that the keys are returned in order
+ all_results = {}
+ for slot_keys in slots_to_keys.values():
+ slot_values = self.execute_command(
+ 'MGET', *slot_keys, **options)
+
+ slot_results = dict(zip(slot_keys, slot_values))
+ all_results.update(slot_results)
+
+ # Sort the results
+ vals_in_order = [all_results[key] for key in keys]
+ return vals_in_order
+
+ def mset_nonatomic(self, mapping):
+ """
+ Sets key/values based on a mapping. Mapping is a dictionary of
+ key/value pairs. Both keys and values should be strings or types that
+ can be cast to a string via str().
+
+ Splits the keys into different slots and then calls MSET
+ for the keys of every slot. This operation will not be atomic
+ if keys belong to more than one slot.
+ """
+
+ # Partition the keys by slot
+ slots_to_pairs = {}
+ for pair in mapping.items():
+ # encode the key
+ k = self.encoder.encode(pair[0])
+ slot = key_slot(k)
+ slots_to_pairs.setdefault(slot, []).extend(pair)
+
+ # Call MSET for every slot and concatenate
+ # the results (one result per slot)
+ res = []
+ for pairs in slots_to_pairs.values():
+ res.append(self.execute_command('MSET', *pairs))
+
+ return res
+
+ def _split_command_across_slots(self, command, *keys):
+ """
+ Runs the given command once for the keys
+ of each slot. Returns the sum of the return values.
+ """
+ # Partition the keys by slot
+ slots_to_keys = self._partition_keys_by_slot(keys)
+
+ # Sum up the reply from each command
+ total = 0
+ for slot_keys in slots_to_keys.values():
+ total += self.execute_command(command, *slot_keys)
+
+ return total
+
+ def exists(self, *keys):
+ """
+ Returns the number of ``names`` that exist in the
+ whole cluster. The keys are first split up into slots
+ and then an EXISTS command is sent for every slot
+ """
+ return self._split_command_across_slots('EXISTS', *keys)
+
+ def delete(self, *keys):
+ """
+ Deletes the given keys in the cluster.
+ The keys are first split up into slots
+ and then an DEL command is sent for every slot
+
+ Non-existant keys are ignored.
+ Returns the number of keys that were deleted.
+ """
+ return self._split_command_across_slots('DEL', *keys)
+
+ def touch(self, *keys):
+ """
+ Updates the last access time of given keys across the
+ cluster.
+
+ The keys are first split up into slots
+ and then an TOUCH command is sent for every slot
+
+ Non-existant keys are ignored.
+ Returns the number of keys that were touched.
+ """
+ return self._split_command_across_slots('TOUCH', *keys)
+
+ def unlink(self, *keys):
+ """
+ Remove the specified keys in a different thread.
+
+ The keys are first split up into slots
+ and then an TOUCH command is sent for every slot
+
+ Non-existant keys are ignored.
+ Returns the number of keys that were unlinked.
+ """
+ return self._split_command_across_slots('UNLINK', *keys)
+
+
+class ClusterManagementCommands:
+ """
+ Redis Cluster management commands
+
+ Commands with the 'target_nodes' argument can be executed on specified
+ nodes. By default, if target_nodes is not specified, the command will be
+ executed on the default cluster node.
+
+ :param :target_nodes: type can be one of the followings:
+ - nodes flag: 'all', 'primaries', 'replicas', 'random'
+ - 'ClusterNode'
+ - 'list(ClusterNodes)'
+ - 'dict(any:clusterNodes)'
+
+ for example:
+ primary = r.get_primaries()[0]
+ r.bgsave(target_nodes=primary)
+ r.bgsave(target_nodes='primaries')
+ """
+ def bgsave(self, schedule=True, target_nodes=None):
+ """
+ Tell the Redis server to save its data to disk. Unlike save(),
+ this method is asynchronous and returns immediately.
+ """
+ pieces = []
+ if schedule:
+ pieces.append("SCHEDULE")
+ return self.execute_command('BGSAVE',
+ *pieces,
+ target_nodes=target_nodes)
+
+ def client_getname(self, target_nodes=None):
+ """
+ Returns the current connection name from all nodes.
+ The result will be a dictionary with the IP and
+ connection name.
+ """
+ return self.execute_command('CLIENT GETNAME',
+ target_nodes=target_nodes)
+
+ def client_getredir(self, target_nodes=None):
+ """Returns the ID (an integer) of the client to whom we are
+ redirecting tracking notifications.
+
+ see: https://redis.io/commands/client-getredir
+ """
+ return self.execute_command('CLIENT GETREDIR',
+ target_nodes=target_nodes)
+
+ def client_id(self, target_nodes=None):
+ """Returns the current connection id"""
+ return self.execute_command('CLIENT ID',
+ target_nodes=target_nodes)
+
+ def client_info(self, target_nodes=None):
+ """
+ Returns information and statistics about the current
+ client connection.
+ """
+ return self.execute_command('CLIENT INFO',
+ target_nodes=target_nodes)
+
+ def client_kill_filter(self, _id=None, _type=None, addr=None,
+ skipme=None, laddr=None, user=None,
+ target_nodes=None):
+ """
+ Disconnects client(s) using a variety of filter options
+ :param id: Kills a client by its unique ID field
+ :param type: Kills a client by type where type is one of 'normal',
+ 'master', 'slave' or 'pubsub'
+ :param addr: Kills a client by its 'address:port'
+ :param skipme: If True, then the client calling the command
+ will not get killed even if it is identified by one of the filter
+ options. If skipme is not provided, the server defaults to skipme=True
+ :param laddr: Kills a client by its 'local (bind) address:port'
+ :param user: Kills a client for a specific user name
+ """
+ args = []
+ if _type is not None:
+ client_types = ('normal', 'master', 'slave', 'pubsub')
+ if str(_type).lower() not in client_types:
+ raise DataError(f"CLIENT KILL type must be one of {client_types!r}")
+ args.extend((b'TYPE', _type))
+ if skipme is not None:
+ if not isinstance(skipme, bool):
+ raise DataError("CLIENT KILL skipme must be a bool")
+ if skipme:
+ args.extend((b'SKIPME', b'YES'))
+ else:
+ args.extend((b'SKIPME', b'NO'))
+ if _id is not None:
+ args.extend((b'ID', _id))
+ if addr is not None:
+ args.extend((b'ADDR', addr))
+ if laddr is not None:
+ args.extend((b'LADDR', laddr))
+ if user is not None:
+ args.extend((b'USER', user))
+ if not args:
+ raise DataError("CLIENT KILL <filter> <value> ... ... <filter> "
+ "<value> must specify at least one filter")
+ return self.execute_command('CLIENT KILL', *args,
+ target_nodes=target_nodes)
+
+ def client_kill(self, address, target_nodes=None):
+ "Disconnects the client at ``address`` (ip:port)"
+ return self.execute_command('CLIENT KILL', address,
+ target_nodes=target_nodes)
+
+ def client_list(self, _type=None, target_nodes=None):
+ """
+ Returns a list of currently connected clients to the entire cluster.
+ If type of client specified, only that type will be returned.
+ :param _type: optional. one of the client types (normal, master,
+ replica, pubsub)
+ """
+ if _type is not None:
+ client_types = ('normal', 'master', 'replica', 'pubsub')
+ if str(_type).lower() not in client_types:
+ raise DataError(f"CLIENT LIST _type must be one of {client_types!r}")
+ return self.execute_command('CLIENT LIST',
+ b'TYPE',
+ _type,
+ target_noes=target_nodes)
+ return self.execute_command('CLIENT LIST',
+ target_nodes=target_nodes)
+
+ def client_pause(self, timeout, target_nodes=None):
+ """
+ Suspend all the Redis clients for the specified amount of time
+ :param timeout: milliseconds to pause clients
+ """
+ if not isinstance(timeout, int):
+ raise DataError("CLIENT PAUSE timeout must be an integer")
+ return self.execute_command('CLIENT PAUSE', str(timeout),
+ target_nodes=target_nodes)
+
+ def client_reply(self, reply, target_nodes=None):
+ """Enable and disable redis server replies.
+ ``reply`` Must be ON OFF or SKIP,
+ ON - The default most with server replies to commands
+ OFF - Disable server responses to commands
+ SKIP - Skip the response of the immediately following command.
+
+ Note: When setting OFF or SKIP replies, you will need a client object
+ with a timeout specified in seconds, and will need to catch the
+ TimeoutError.
+ The test_client_reply unit test illustrates this, and
+ conftest.py has a client with a timeout.
+ See https://redis.io/commands/client-reply
+ """
+ replies = ['ON', 'OFF', 'SKIP']
+ if reply not in replies:
+ raise DataError(f'CLIENT REPLY must be one of {replies!r}')
+ return self.execute_command("CLIENT REPLY", reply,
+ target_nodes=target_nodes)
+
+ def client_setname(self, name, target_nodes=None):
+ "Sets the current connection name"
+ return self.execute_command('CLIENT SETNAME', name,
+ target_nodes=target_nodes)
+
+ def client_trackinginfo(self, target_nodes=None):
+ """
+ Returns the information about the current client connection's
+ use of the server assisted client side cache.
+ See https://redis.io/commands/client-trackinginfo
+ """
+ return self.execute_command('CLIENT TRACKINGINFO',
+ target_nodes=target_nodes)
+
+ def client_unblock(self, client_id, error=False, target_nodes=None):
+ """
+ Unblocks a connection by its client id.
+ If ``error`` is True, unblocks the client with a special error message.
+ If ``error`` is False (default), the client is unblocked using the
+ regular timeout mechanism.
+ """
+ args = ['CLIENT UNBLOCK', int(client_id)]
+ if error:
+ args.append(b'ERROR')
+ return self.execute_command(*args, target_nodes=target_nodes)
+
+ def client_unpause(self, target_nodes=None):
+ """
+ Unpause all redis clients
+ """
+ return self.execute_command('CLIENT UNPAUSE',
+ target_nodes=target_nodes)
+
+ def command(self, target_nodes=None):
+ """
+ Returns dict reply of details about all Redis commands.
+ """
+ return self.execute_command('COMMAND', target_nodes=target_nodes)
+
+ def command_count(self, target_nodes=None):
+ """
+ Returns Integer reply of number of total commands in this Redis server.
+ """
+ return self.execute_command('COMMAND COUNT', target_nodes=target_nodes)
+
+ def config_get(self, pattern="*", target_nodes=None):
+ """
+ Return a dictionary of configuration based on the ``pattern``
+ """
+ return self.execute_command('CONFIG GET',
+ pattern,
+ target_nodes=target_nodes)
+
+ def config_resetstat(self, target_nodes=None):
+ """Reset runtime statistics"""
+ return self.execute_command('CONFIG RESETSTAT',
+ target_nodes=target_nodes)
+
+ def config_rewrite(self, target_nodes=None):
+ """
+ Rewrite config file with the minimal change to reflect running config.
+ """
+ return self.execute_command('CONFIG REWRITE',
+ target_nodes=target_nodes)
+
+ def config_set(self, name, value, target_nodes=None):
+ "Set config item ``name`` with ``value``"
+ return self.execute_command('CONFIG SET',
+ name,
+ value,
+ target_nodes=target_nodes)
+
+ def dbsize(self, target_nodes=None):
+ """
+ Sums the number of keys in the target nodes' DB.
+
+ :target_nodes: 'ClusterNode' or 'list(ClusterNodes)'
+ The node/s to execute the command on
+ """
+ return self.execute_command('DBSIZE',
+ target_nodes=target_nodes)
+
+ def debug_object(self, key):
+ raise NotImplementedError(
+ "DEBUG OBJECT is intentionally not implemented in the client."
+ )
+
+ def debug_segfault(self):
+ raise NotImplementedError(
+ "DEBUG SEGFAULT is intentionally not implemented in the client."
+ )
+
+ def echo(self, value, target_nodes):
+ """Echo the string back from the server"""
+ return self.execute_command('ECHO', value,
+ target_nodes=target_nodes)
+
+ def flushall(self, asynchronous=False, target_nodes=None):
+ """
+ Delete all keys in the database.
+ In cluster mode this method is the same as flushdb
+
+ ``asynchronous`` indicates whether the operation is
+ executed asynchronously by the server.
+ """
+ args = []
+ if asynchronous:
+ args.append(b'ASYNC')
+ return self.execute_command('FLUSHALL',
+ *args,
+ target_nodes=target_nodes)
+
+ def flushdb(self, asynchronous=False, target_nodes=None):
+ """
+ Delete all keys in the database.
+
+ ``asynchronous`` indicates whether the operation is
+ executed asynchronously by the server.
+ """
+ args = []
+ if asynchronous:
+ args.append(b'ASYNC')
+ return self.execute_command('FLUSHDB',
+ *args,
+ target_nodes=target_nodes)
+
+ def info(self, section=None, target_nodes=None):
+ """
+ Returns a dictionary containing information about the Redis server
+
+ The ``section`` option can be used to select a specific section
+ of information
+
+ The section option is not supported by older versions of Redis Server,
+ and will generate ResponseError
+ """
+ if section is None:
+ return self.execute_command('INFO',
+ target_nodes=target_nodes)
+ else:
+ return self.execute_command('INFO',
+ section,
+ target_nodes=target_nodes)
+
+ def keys(self, pattern='*', target_nodes=None):
+ "Returns a list of keys matching ``pattern``"
+ return self.execute_command('KEYS', pattern, target_nodes=target_nodes)
+
+ def lastsave(self, target_nodes=None):
+ """
+ Return a Python datetime object representing the last time the
+ Redis database was saved to disk
+ """
+ return self.execute_command('LASTSAVE',
+ target_nodes=target_nodes)
+
+ def memory_doctor(self):
+ raise NotImplementedError(
+ "MEMORY DOCTOR is intentionally not implemented in the client."
+ )
+
+ def memory_help(self):
+ raise NotImplementedError(
+ "MEMORY HELP is intentionally not implemented in the client."
+ )
+
+ def memory_malloc_stats(self, target_nodes=None):
+ """Return an internal statistics report from the memory allocator."""
+ return self.execute_command('MEMORY MALLOC-STATS',
+ target_nodes=target_nodes)
+
+ def memory_purge(self, target_nodes=None):
+ """Attempts to purge dirty pages for reclamation by allocator"""
+ return self.execute_command('MEMORY PURGE',
+ target_nodes=target_nodes)
+
+ def memory_stats(self, target_nodes=None):
+ """Return a dictionary of memory stats"""
+ return self.execute_command('MEMORY STATS',
+ target_nodes=target_nodes)
+
+ def memory_usage(self, key, samples=None):
+ """
+ Return the total memory usage for key, its value and associated
+ administrative overheads.
+
+ For nested data structures, ``samples`` is the number of elements to
+ sample. If left unspecified, the server's default is 5. Use 0 to sample
+ all elements.
+ """
+ args = []
+ if isinstance(samples, int):
+ args.extend([b'SAMPLES', samples])
+ return self.execute_command('MEMORY USAGE', key, *args)
+
+ def object(self, infotype, key):
+ """Return the encoding, idletime, or refcount about the key"""
+ return self.execute_command('OBJECT', infotype, key, infotype=infotype)
+
+ def ping(self, target_nodes=None):
+ """
+ Ping the cluster's servers.
+ If no target nodes are specified, sent to all nodes and returns True if
+ the ping was successful across all nodes.
+ """
+ return self.execute_command('PING',
+ target_nodes=target_nodes)
+
+ def randomkey(self, target_nodes=None):
+ """
+ Returns the name of a random key"
+ """
+ return self.execute_command('RANDOMKEY', target_nodes=target_nodes)
+
+ def save(self, target_nodes=None):
+ """
+ Tell the Redis server to save its data to disk,
+ blocking until the save is complete
+ """
+ return self.execute_command('SAVE', target_nodes=target_nodes)
+
+ def scan(self, cursor=0, match=None, count=None, _type=None,
+ target_nodes=None):
+ """
+ Incrementally return lists of key names. Also return a cursor
+ indicating the scan position.
+
+ ``match`` allows for filtering the keys by pattern
+
+ ``count`` provides a hint to Redis about the number of keys to
+ return per batch.
+
+ ``_type`` filters the returned values by a particular Redis type.
+ Stock Redis instances allow for the following types:
+ HASH, LIST, SET, STREAM, STRING, ZSET
+ Additionally, Redis modules can expose other types as well.
+ """
+ pieces = [cursor]
+ if match is not None:
+ pieces.extend([b'MATCH', match])
+ if count is not None:
+ pieces.extend([b'COUNT', count])
+ if _type is not None:
+ pieces.extend([b'TYPE', _type])
+ return self.execute_command('SCAN', *pieces, target_nodes=target_nodes)
+
+ def scan_iter(self, match=None, count=None, _type=None, target_nodes=None):
+ """
+ Make an iterator using the SCAN command so that the client doesn't
+ need to remember the cursor position.
+
+ ``match`` allows for filtering the keys by pattern
+
+ ``count`` provides a hint to Redis about the number of keys to
+ return per batch.
+
+ ``_type`` filters the returned values by a particular Redis type.
+ Stock Redis instances allow for the following types:
+ HASH, LIST, SET, STREAM, STRING, ZSET
+ Additionally, Redis modules can expose other types as well.
+ """
+ cursor = '0'
+ while cursor != 0:
+ cursor, data = self.scan(cursor=cursor, match=match,
+ count=count, _type=_type,
+ target_nodes=target_nodes)
+ yield from data
+
+ def shutdown(self, save=False, nosave=False, target_nodes=None):
+ """Shutdown the Redis server. If Redis has persistence configured,
+ data will be flushed before shutdown. If the "save" option is set,
+ a data flush will be attempted even if there is no persistence
+ configured. If the "nosave" option is set, no data flush will be
+ attempted. The "save" and "nosave" options cannot both be set.
+ """
+ if save and nosave:
+ raise DataError('SHUTDOWN save and nosave cannot both be set')
+ args = ['SHUTDOWN']
+ if save:
+ args.append('SAVE')
+ if nosave:
+ args.append('NOSAVE')
+ try:
+ self.execute_command(*args, target_nodes=target_nodes)
+ except ConnectionError:
+ # a ConnectionError here is expected
+ return
+ raise RedisError("SHUTDOWN seems to have failed.")
+
+ def slowlog_get(self, num=None, target_nodes=None):
+ """
+ Get the entries from the slowlog. If ``num`` is specified, get the
+ most recent ``num`` items.
+ """
+ args = ['SLOWLOG GET']
+ if num is not None:
+ args.append(num)
+
+ return self.execute_command(*args,
+ target_nodes=target_nodes)
+
+ def slowlog_len(self, target_nodes=None):
+ "Get the number of items in the slowlog"
+ return self.execute_command('SLOWLOG LEN',
+ target_nodes=target_nodes)
+
+ def slowlog_reset(self, target_nodes=None):
+ "Remove all items in the slowlog"
+ return self.execute_command('SLOWLOG RESET',
+ target_nodes=target_nodes)
+
+ def stralgo(self, algo, value1, value2, specific_argument='strings',
+ len=False, idx=False, minmatchlen=None, withmatchlen=False,
+ target_nodes=None):
+ """
+ Implements complex algorithms that operate on strings.
+ Right now the only algorithm implemented is the LCS algorithm
+ (longest common substring). However new algorithms could be
+ implemented in the future.
+
+ ``algo`` Right now must be LCS
+ ``value1`` and ``value2`` Can be two strings or two keys
+ ``specific_argument`` Specifying if the arguments to the algorithm
+ will be keys or strings. strings is the default.
+ ``len`` Returns just the len of the match.
+ ``idx`` Returns the match positions in each string.
+ ``minmatchlen`` Restrict the list of matches to the ones of a given
+ minimal length. Can be provided only when ``idx`` set to True.
+ ``withmatchlen`` Returns the matches with the len of the match.
+ Can be provided only when ``idx`` set to True.
+ """
+ # check validity
+ supported_algo = ['LCS']
+ if algo not in supported_algo:
+ supported_algos_str = ', '.join(supported_algo)
+ raise DataError(f"The supported algorithms are: {supported_algos_str}")
+ if specific_argument not in ['keys', 'strings']:
+ raise DataError("specific_argument can be only keys or strings")
+ if len and idx:
+ raise DataError("len and idx cannot be provided together.")
+
+ pieces = [algo, specific_argument.upper(), value1, value2]
+ if len:
+ pieces.append(b'LEN')
+ if idx:
+ pieces.append(b'IDX')
+ try:
+ int(minmatchlen)
+ pieces.extend([b'MINMATCHLEN', minmatchlen])
+ except TypeError:
+ pass
+ if withmatchlen:
+ pieces.append(b'WITHMATCHLEN')
+ if specific_argument == 'strings' and target_nodes is None:
+ target_nodes = 'default-node'
+ return self.execute_command('STRALGO', *pieces, len=len, idx=idx,
+ minmatchlen=minmatchlen,
+ withmatchlen=withmatchlen,
+ target_nodes=target_nodes)
+
+ def time(self, target_nodes=None):
+ """
+ Returns the server time as a 2-item tuple of ints:
+ (seconds since epoch, microseconds into this second).
+ """
+ return self.execute_command('TIME', target_nodes=target_nodes)
+
+ def wait(self, num_replicas, timeout, target_nodes=None):
+ """
+ Redis synchronous replication
+ That returns the number of replicas that processed the query when
+ we finally have at least ``num_replicas``, or when the ``timeout`` was
+ reached.
+
+ If more than one target node are passed the result will be summed up
+ """
+ return self.execute_command('WAIT', num_replicas,
+ timeout,
+ target_nodes=target_nodes)
+
+
+class ClusterPubSubCommands:
+ """
+ Redis PubSub commands for RedisCluster use.
+ see https://redis.io/topics/pubsub
+ """
+ def publish(self, channel, message, target_nodes=None):
+ """
+ Publish ``message`` on ``channel``.
+ Returns the number of subscribers the message was delivered to.
+ """
+ return self.execute_command('PUBLISH', channel, message,
+ target_nodes=target_nodes)
+
+ def pubsub_channels(self, pattern='*', target_nodes=None):
+ """
+ Return a list of channels that have at least one subscriber
+ """
+ return self.execute_command('PUBSUB CHANNELS', pattern,
+ target_nodes=target_nodes)
+
+ def pubsub_numpat(self, target_nodes=None):
+ """
+ Returns the number of subscriptions to patterns
+ """
+ return self.execute_command('PUBSUB NUMPAT', target_nodes=target_nodes)
+
+ def pubsub_numsub(self, *args, target_nodes=None):
+ """
+ Return a list of (channel, number of subscribers) tuples
+ for each channel given in ``*args``
+ """
+ return self.execute_command('PUBSUB NUMSUB', *args,
+ target_nodes=target_nodes)
+
+
+class ClusterCommands(ClusterManagementCommands, ClusterMultiKeyCommands,
+ ClusterPubSubCommands, DataAccessCommands):
+ """
+ Redis Cluster commands
+
+ Commands with the 'target_nodes' argument can be executed on specified
+ nodes. By default, if target_nodes is not specified, the command will be
+ executed on the default cluster node.
+
+ :param :target_nodes: type can be one of the followings:
+ - nodes flag: 'all', 'primaries', 'replicas', 'random'
+ - 'ClusterNode'
+ - 'list(ClusterNodes)'
+ - 'dict(any:clusterNodes)'
+
+ for example:
+ r.cluster_info(target_nodes='all')
+ """
+ def cluster_addslots(self, target_node, *slots):
+ """
+ Assign new hash slots to receiving node. Sends to specified node.
+
+ :target_node: 'ClusterNode'
+ The node to execute the command on
+ """
+ return self.execute_command('CLUSTER ADDSLOTS', *slots,
+ target_nodes=target_node)
+
+ def cluster_countkeysinslot(self, slot_id):
+ """
+ Return the number of local keys in the specified hash slot
+ Send to node based on specified slot_id
+ """
+ return self.execute_command('CLUSTER COUNTKEYSINSLOT', slot_id)
+
+ def cluster_count_failure_report(self, node_id):
+ """
+ Return the number of failure reports active for a given node
+ Sends to a random node
+ """
+ return self.execute_command('CLUSTER COUNT-FAILURE-REPORTS', node_id)
+
+ def cluster_delslots(self, *slots):
+ """
+ Set hash slots as unbound in the cluster.
+ It determines by it self what node the slot is in and sends it there
+
+ Returns a list of the results for each processed slot.
+ """
+ return [
+ self.execute_command('CLUSTER DELSLOTS', slot)
+ for slot in slots
+ ]
+
+ def cluster_failover(self, target_node, option=None):
+ """
+ Forces a slave to perform a manual failover of its master
+ Sends to specified node
+
+ :target_node: 'ClusterNode'
+ The node to execute the command on
+ """
+ if option:
+ if option.upper() not in ['FORCE', 'TAKEOVER']:
+ raise RedisError(
+ f'Invalid option for CLUSTER FAILOVER command: {option}')
+ else:
+ return self.execute_command('CLUSTER FAILOVER', option,
+ target_nodes=target_node)
+ else:
+ return self.execute_command('CLUSTER FAILOVER',
+ target_nodes=target_node)
+
+ def cluster_info(self, target_nodes=None):
+ """
+ Provides info about Redis Cluster node state.
+ The command will be sent to a random node in the cluster if no target
+ node is specified.
+ """
+ return self.execute_command('CLUSTER INFO', target_nodes=target_nodes)
+
+ def cluster_keyslot(self, key):
+ """
+ Returns the hash slot of the specified key
+ Sends to random node in the cluster
+ """
+ return self.execute_command('CLUSTER KEYSLOT', key)
+
+ def cluster_meet(self, host, port, target_nodes=None):
+ """
+ Force a node cluster to handshake with another node.
+ Sends to specified node.
+ """
+ return self.execute_command('CLUSTER MEET', host, port,
+ target_nodes=target_nodes)
+
+ def cluster_nodes(self):
+ """
+ Force a node cluster to handshake with another node
+
+ Sends to random node in the cluster
+ """
+ return self.execute_command('CLUSTER NODES')
+
+ def cluster_replicate(self, target_nodes, node_id):
+ """
+ Reconfigure a node as a slave of the specified master node
+ """
+ return self.execute_command('CLUSTER REPLICATE', node_id,
+ target_nodes=target_nodes)
+
+ def cluster_reset(self, soft=True, target_nodes=None):
+ """
+ Reset a Redis Cluster node
+
+ If 'soft' is True then it will send 'SOFT' argument
+ If 'soft' is False then it will send 'HARD' argument
+ """
+ return self.execute_command('CLUSTER RESET',
+ b'SOFT' if soft else b'HARD',
+ target_nodes=target_nodes)
+
+ def cluster_save_config(self, target_nodes=None):
+ """
+ Forces the node to save cluster state on disk
+ """
+ return self.execute_command('CLUSTER SAVECONFIG',
+ target_nodes=target_nodes)
+
+ def cluster_get_keys_in_slot(self, slot, num_keys):
+ """
+ Returns the number of keys in the specified cluster slot
+ """
+ return self.execute_command('CLUSTER GETKEYSINSLOT', slot, num_keys)
+
+ def cluster_set_config_epoch(self, epoch, target_nodes=None):
+ """
+ Set the configuration epoch in a new node
+ """
+ return self.execute_command('CLUSTER SET-CONFIG-EPOCH', epoch,
+ target_nodes=target_nodes)
+
+ def cluster_setslot(self, target_node, node_id, slot_id, state):
+ """
+ Bind an hash slot to a specific node
+
+ :target_node: 'ClusterNode'
+ The node to execute the command on
+ """
+ if state.upper() in ('IMPORTING', 'NODE', 'MIGRATING'):
+ return self.execute_command('CLUSTER SETSLOT', slot_id, state,
+ node_id, target_nodes=target_node)
+ elif state.upper() == 'STABLE':
+ raise RedisError('For "stable" state please use '
+ 'cluster_setslot_stable')
+ else:
+ raise RedisError(f'Invalid slot state: {state}')
+
+ def cluster_setslot_stable(self, slot_id):
+ """
+ Clears migrating / importing state from the slot.
+ It determines by it self what node the slot is in and sends it there.
+ """
+ return self.execute_command('CLUSTER SETSLOT', slot_id, 'STABLE')
+
+ def cluster_replicas(self, node_id, target_nodes=None):
+ """
+ Provides a list of replica nodes replicating from the specified primary
+ target node.
+ """
+ return self.execute_command('CLUSTER REPLICAS', node_id,
+ target_nodes=target_nodes)
+
+ def cluster_slots(self, target_nodes=None):
+ """
+ Get array of Cluster slot to node mappings
+ """
+ return self.execute_command('CLUSTER SLOTS', target_nodes=target_nodes)
+
+ def readonly(self, target_nodes=None):
+ """
+ Enables read queries.
+ The command will be sent to the default cluster node if target_nodes is
+ not specified.
+ """
+ if target_nodes == 'replicas' or target_nodes == 'all':
+ # read_from_replicas will only be enabled if the READONLY command
+ # is sent to all replicas
+ self.read_from_replicas = True
+ return self.execute_command('READONLY', target_nodes=target_nodes)
+
+ def readwrite(self, target_nodes=None):
+ """
+ Disables read queries.
+ The command will be sent to the default cluster node if target_nodes is
+ not specified.
+ """
+ # Reset read from replicas flag
+ self.read_from_replicas = False
+ return self.execute_command('READWRITE', target_nodes=target_nodes)
diff --git a/redis/commands/core.py b/redis/commands/core.py
index 865f3f7..b769847 100644
--- a/redis/commands/core.py
+++ b/redis/commands/core.py
@@ -12,15 +12,11 @@ from redis.exceptions import (
)
-class CoreCommands:
+class ACLCommands:
"""
- A class containing all of the implemented redis commands. This class is
- to be used as a mixin.
+ Redis Access Control List (ACL) commands.
+ see: https://redis.io/topics/acl
"""
-
- # SERVER INFORMATION
-
- # ACL methods
def acl_cat(self, category=None):
"""
Returns a list of categories or commands within a category.
@@ -231,8 +227,8 @@ class CoreCommands:
elif password.startswith(b'-'):
pieces.append(b'<%s' % password[1:])
else:
- raise DataError('Password %d must be prefixeed with a '
- '"+" to add or a "-" to remove' % i)
+ raise DataError(f'Password {i} must be prefixed with a '
+ f'"+" to add or a "-" to remove')
if hashed_passwords:
# as most users will have only one password, allow remove_passwords
@@ -245,8 +241,8 @@ class CoreCommands:
elif hashed_password.startswith(b'-'):
pieces.append(b'!%s' % hashed_password[1:])
else:
- raise DataError('Hashed %d password must be prefixeed '
- 'with a "+" to add or a "-" to remove' % i)
+ raise DataError(f'Hashed password {i} must be prefixed with a '
+ f'"+" to add or a "-" to remove')
if nopass:
pieces.append(b'nopass')
@@ -264,16 +260,18 @@ class CoreCommands:
elif category.startswith(b'-'):
pieces.append(b'-@%s' % category[1:])
else:
- raise DataError('Category "%s" must be prefixed with '
- '"+" or "-"'
- % encoder.decode(category, force=True))
+ raise DataError(
+ f'Category "{encoder.decode(category, force=True)}" '
+ 'must be prefixed with "+" or "-"'
+ )
if commands:
for cmd in commands:
cmd = encoder.encode(cmd)
if not cmd.startswith(b'+') and not cmd.startswith(b'-'):
- raise DataError('Command "%s" must be prefixed with '
- '"+" or "-"'
- % encoder.decode(cmd, force=True))
+ raise DataError(
+ f'Command "{encoder.decode(cmd, force=True)}" '
+ 'must be prefixed with "+" or "-"'
+ )
pieces.append(cmd)
if keys:
@@ -297,6 +295,11 @@ class CoreCommands:
"""
return self.execute_command('ACL WHOAMI')
+
+class ManagementCommands:
+ """
+ Redis management commands
+ """
def bgrewriteaof(self):
"""Tell the Redis server to rewrite the AOF file from data in memory.
@@ -349,8 +352,7 @@ class CoreCommands:
if _type is not None:
client_types = ('normal', 'master', 'slave', 'pubsub')
if str(_type).lower() not in client_types:
- raise DataError("CLIENT KILL type must be one of %r" % (
- client_types,))
+ raise DataError(f"CLIENT KILL type must be one of {client_types!r}")
args.extend((b'TYPE', _type))
if skipme is not None:
if not isinstance(skipme, bool):
@@ -395,8 +397,7 @@ class CoreCommands:
if _type is not None:
client_types = ('normal', 'master', 'replica', 'pubsub')
if str(_type).lower() not in client_types:
- raise DataError("CLIENT LIST _type must be one of %r" % (
- client_types,))
+ raise DataError(f"CLIENT LIST _type must be one of {client_types!r}")
args.append(b'TYPE')
args.append(_type)
if not isinstance(client_id, list):
@@ -441,7 +442,7 @@ class CoreCommands:
"""
replies = ['ON', 'OFF', 'SKIP']
if reply not in replies:
- raise DataError('CLIENT REPLY must be one of %r' % replies)
+ raise DataError(f'CLIENT REPLY must be one of {replies!r}')
return self.execute_command("CLIENT REPLY", reply)
def client_id(self):
@@ -502,6 +503,14 @@ class CoreCommands:
"""
return self.execute_command('CLIENT UNPAUSE')
+ def command_info(self):
+ raise NotImplementedError(
+ "COMMAND INFO is intentionally not implemented in the client."
+ )
+
+ def command_count(self):
+ return self.execute_command('COMMAND COUNT')
+
def readwrite(self):
"""
Disables read queries for a connection to a Redis Cluster slave node.
@@ -549,6 +558,9 @@ class CoreCommands:
"""
return self.execute_command('CONFIG REWRITE')
+ def cluster(self, cluster_arg, *args):
+ return self.execute_command(f'CLUSTER {cluster_arg.upper()}', *args)
+
def dbsize(self):
"""
Returns the number of keys in the current database
@@ -772,6 +784,17 @@ class CoreCommands:
"""
return self.execute_command('QUIT')
+ def replicaof(self, *args):
+ """
+ Update the replication settings of a redis replica, on the fly.
+ Examples of valid arguments include:
+ NO ONE (set no replication)
+ host port (set to the host and port of a redis server)
+
+ For more information check https://redis.io/commands/replicaof
+ """
+ return self.execute_command('REPLICAOF', *args)
+
def save(self):
"""
Tell the Redis server to save its data to disk,
@@ -866,7 +889,11 @@ class CoreCommands:
"""
return self.execute_command('WAIT', num_replicas, timeout)
- # BASIC KEY COMMANDS
+
+class BasicKeyCommands:
+ """
+ Redis basic key-based commands
+ """
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
@@ -989,7 +1016,10 @@ class CoreCommands:
For more information check https://redis.io/commands/dump
"""
- return self.execute_command('DUMP', name)
+ from redis.client import NEVER_DECODE
+ options = {}
+ options[NEVER_DECODE] = []
+ return self.execute_command('DUMP', name, **options)
def exists(self, *names):
"""
@@ -1064,7 +1094,7 @@ class CoreCommands:
For more information check https://redis.io/commands/getex
"""
- opset = set([ex, px, exat, pxat])
+ opset = {ex, px, exat, pxat}
if len(opset) > 2 or len(opset) > 1 and persist:
raise DataError("``ex``, ``px``, ``exat``, ``pxat``, "
"and ``persist`` are mutually exclusive.")
@@ -1532,11 +1562,10 @@ class CoreCommands:
# check validity
supported_algo = ['LCS']
if algo not in supported_algo:
- raise DataError("The supported algorithms are: %s"
- % (', '.join(supported_algo)))
+ supported_algos_str = ', '.join(supported_algo)
+ raise DataError(f"The supported algorithms are: {supported_algos_str}")
if specific_argument not in ['keys', 'strings']:
- raise DataError("specific_argument can be only"
- " keys or strings")
+ raise DataError("specific_argument can be only keys or strings")
if len and idx:
raise DataError("len and idx cannot be provided together.")
@@ -1622,7 +1651,12 @@ class CoreCommands:
"""
return self.execute_command('UNLINK', *names)
- # LIST COMMANDS
+
+class ListCommands:
+ """
+ Redis commands for List data type.
+ see: https://redis.io/topics/data-types#lists
+ """
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
@@ -1923,7 +1957,12 @@ class CoreCommands:
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options)
- # SCAN COMMANDS
+
+class ScanCommands:
+ """
+ Redis SCAN commands.
+ see: https://redis.io/commands/scan
+ """
def scan(self, cursor=0, match=None, count=None, _type=None):
"""
Incrementally return lists of key names. Also return a cursor
@@ -2078,7 +2117,12 @@ class CoreCommands:
score_cast_func=score_cast_func)
yield from data
- # SET COMMANDS
+
+class SetCommands:
+ """
+ Redis commands for Set data type.
+ see: https://redis.io/topics/data-types#sets
+ """
def sadd(self, name, *values):
"""
Add ``value(s)`` to set ``name``
@@ -2216,7 +2260,12 @@ class CoreCommands:
args = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *args)
- # STREAMS COMMANDS
+
+class StreamCommands:
+ """
+ Redis commands for Stream data type.
+ see: https://redis.io/topics/streams-intro
+ """
def xack(self, name, groupname, *ids):
"""
Acknowledges the successful processing of one or more messages.
@@ -2693,7 +2742,12 @@ class CoreCommands:
return self.execute_command('XTRIM', name, *pieces)
- # SORTED SET COMMANDS
+
+class SortedSetCommands:
+ """
+ Redis commands for Sorted Sets data type.
+ see: https://redis.io/topics/data-types-intro#redis-sorted-sets
+ """
def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False,
gt=None, lt=None):
"""
@@ -3281,7 +3335,12 @@ class CoreCommands:
pieces.append(b'WITHSCORES')
return self.execute_command(*pieces, **options)
- # HYPERLOGLOG COMMANDS
+
+class HyperlogCommands:
+ """
+ Redis commands of HyperLogLogs data type.
+ see: https://redis.io/topics/data-types-intro#hyperloglogs
+ """
def pfadd(self, name, *values):
"""
Adds the specified elements to the specified HyperLogLog.
@@ -3307,7 +3366,12 @@ class CoreCommands:
"""
return self.execute_command('PFMERGE', dest, *sources)
- # HASH COMMANDS
+
+class HashCommands:
+ """
+ Redis commands for Hash data type.
+ see: https://redis.io/topics/data-types-intro#redis-hashes
+ """
def hdel(self, name, *keys):
"""
Delete ``keys`` from hash ``name``
@@ -3409,8 +3473,8 @@ class CoreCommands:
For more information check https://redis.io/commands/hmset
"""
warnings.warn(
- '%s.hmset() is deprecated. Use %s.hset() instead.'
- % (self.__class__.__name__, self.__class__.__name__),
+ f'{self.__class__.__name__}.hmset() is deprecated. '
+ f'Use {self.__class__.__name__}.hset() instead.',
DeprecationWarning,
stacklevel=2,
)
@@ -3447,6 +3511,12 @@ class CoreCommands:
"""
return self.execute_command('HSTRLEN', name, key)
+
+class PubSubCommands:
+ """
+ Redis PubSub commands.
+ see https://redis.io/topics/pubsub
+ """
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
@@ -3481,20 +3551,12 @@ class CoreCommands:
"""
return self.execute_command('PUBSUB NUMSUB', *args)
- def cluster(self, cluster_arg, *args):
- return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args)
-
- def replicaof(self, *args):
- """
- Update the replication settings of a redis replica, on the fly.
- Examples of valid arguments include:
- NO ONE (set no replication)
- host port (set to the host and port of a redis server)
-
- For more information check https://redis.io/commands/replicaof
- """
- return self.execute_command('REPLICAOF', *args)
+class ScriptCommands:
+ """
+ Redis Lua script commands. see:
+ https://redis.com/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/
+ """
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
@@ -3580,7 +3642,12 @@ class CoreCommands:
"""
return Script(self, script)
- # GEO COMMANDS
+
+class GeoCommands:
+ """
+ Redis Geospatial commands.
+ see: https://redis.com/redis-best-practices/indexing-patterns/geospatial/
+ """
def geoadd(self, name, values, nx=False, xx=False, ch=False):
"""
Add the specified geospatial items to the specified key identified
@@ -3890,7 +3957,12 @@ class CoreCommands:
return self.execute_command(command, *pieces, **kwargs)
- # MODULE COMMANDS
+
+class ModuleCommands:
+ """
+ Redis Module commands.
+ see: https://redis.io/topics/modules-intro
+ """
def module_load(self, path, *args):
"""
Loads the module from ``path``.
@@ -3927,12 +3999,17 @@ class CoreCommands:
def command_count(self):
return self.execute_command('COMMAND COUNT')
+ def command_getkeys(self, *args):
+ return self.execute_command('COMMAND GETKEYS', *args)
+
def command(self):
return self.execute_command('COMMAND')
class Script:
- "An executable Lua script object returned by ``register_script``"
+ """
+ An executable Lua script object returned by ``register_script``
+ """
def __init__(self, registered_client, script):
self.registered_client = registered_client
@@ -4061,3 +4138,22 @@ class BitFieldOperation:
command = self.command
self.reset()
return self.client.execute_command(*command)
+
+
+class DataAccessCommands(BasicKeyCommands, ListCommands,
+ ScanCommands, SetCommands, StreamCommands,
+ SortedSetCommands,
+ HyperlogCommands, HashCommands, GeoCommands,
+ ):
+ """
+ A class containing all of the implemented data access redis commands.
+ This class is to be used as a mixin.
+ """
+
+
+class CoreCommands(ACLCommands, DataAccessCommands, ManagementCommands,
+ ModuleCommands, PubSubCommands, ScriptCommands):
+ """
+ A class containing all of the implemented redis commands. This class is
+ to be used as a mixin.
+ """
diff --git a/redis/commands/helpers.py b/redis/commands/helpers.py
index 46eb83d..dc5705b 100644
--- a/redis/commands/helpers.py
+++ b/redis/commands/helpers.py
@@ -35,9 +35,12 @@ def delist(x):
def parse_to_list(response):
- """Optimistally parse the response to a list.
- """
+ """Optimistically parse the response to a list."""
res = []
+
+ if response is None:
+ return res
+
for item in response:
try:
res.append(int(item))
@@ -51,6 +54,40 @@ def parse_to_list(response):
return res
+def parse_list_to_dict(response):
+ res = {}
+ for i in range(0, len(response), 2):
+ if isinstance(response[i], list):
+ res['Child iterators'].append(parse_list_to_dict(response[i]))
+ elif isinstance(response[i+1], list):
+ res['Child iterators'] = [parse_list_to_dict(response[i+1])]
+ else:
+ try:
+ res[response[i]] = float(response[i+1])
+ except (TypeError, ValueError):
+ res[response[i]] = response[i+1]
+ return res
+
+
+def parse_to_dict(response):
+ if response is None:
+ return {}
+
+ res = {}
+ for det in response:
+ if isinstance(det[1], list):
+ res[det[0]] = parse_list_to_dict(det[1])
+ else:
+ try: # try to set the attribute. may be provided without value
+ try: # try to convert the value to float
+ res[det[0]] = float(det[1])
+ except (TypeError, ValueError):
+ res[det[0]] = det[1]
+ except IndexError:
+ pass
+ return res
+
+
def random_string(length=10):
"""
Returns a random N character long string.
@@ -76,4 +113,4 @@ def quote_string(v):
v = v.replace('"', '\\"')
- return '"{}"'.format(v)
+ return f'"{v}"'
diff --git a/redis/commands/json/commands.py b/redis/commands/json/commands.py
index 4436f6a..1affaaf 100644
--- a/redis/commands/json/commands.py
+++ b/redis/commands/json/commands.py
@@ -10,7 +10,9 @@ class JSONCommands:
def arrappend(self, name, path=Path.rootPath(), *args):
"""Append the objects ``args`` to the array under the
``path` in key ``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonarrappend
+ """ # noqa
pieces = [name, str(path)]
for o in args:
pieces.append(self._encode(o))
@@ -23,7 +25,9 @@ class JSONCommands:
The search can be limited using the optional inclusive ``start``
and exclusive ``stop`` indices.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonarrindex
+ """ # noqa
return self.execute_command(
"JSON.ARRINDEX", name, str(path), self._encode(scalar),
start, stop
@@ -32,7 +36,9 @@ class JSONCommands:
def arrinsert(self, name, path, index, *args):
"""Insert the objects ``args`` to the array at index ``index``
under the ``path` in key ``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonarrinsert
+ """ # noqa
pieces = [name, str(path), index]
for o in args:
pieces.append(self._encode(o))
@@ -41,45 +47,64 @@ class JSONCommands:
def arrlen(self, name, path=Path.rootPath()):
"""Return the length of the array JSON value under ``path``
at key``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonarrlen
+ """ # noqa
return self.execute_command("JSON.ARRLEN", name, str(path))
def arrpop(self, name, path=Path.rootPath(), index=-1):
"""Pop the element at ``index`` in the array JSON value under
``path`` at key ``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonarrpop
+ """ # noqa
return self.execute_command("JSON.ARRPOP", name, str(path), index)
def arrtrim(self, name, path, start, stop):
"""Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonarrtrim
+ """ # noqa
return self.execute_command("JSON.ARRTRIM", name, str(path),
start, stop)
def type(self, name, path=Path.rootPath()):
- """Get the type of the JSON value under ``path`` from key ``name``."""
+ """Get the type of the JSON value under ``path`` from key ``name``.
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsontype
+ """ # noqa
return self.execute_command("JSON.TYPE", name, str(path))
def resp(self, name, path=Path.rootPath()):
- """Return the JSON value under ``path`` at key ``name``."""
+ """Return the JSON value under ``path`` at key ``name``.
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonresp
+ """ # noqa
return self.execute_command("JSON.RESP", name, str(path))
def objkeys(self, name, path=Path.rootPath()):
"""Return the key names in the dictionary JSON value under ``path`` at
- key ``name``."""
+ key ``name``.
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonobjkeys
+ """ # noqa
return self.execute_command("JSON.OBJKEYS", name, str(path))
def objlen(self, name, path=Path.rootPath()):
"""Return the length of the dictionary JSON value under ``path`` at key
``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonobjlen
+ """ # noqa
return self.execute_command("JSON.OBJLEN", name, str(path))
def numincrby(self, name, path, number):
"""Increment the numeric (integer or floating point) JSON value under
``path`` at key ``name`` by the provided ``number``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonnumincrby
+ """ # noqa
return self.execute_command(
"JSON.NUMINCRBY", name, str(path), self._encode(number)
)
@@ -88,7 +113,9 @@ class JSONCommands:
def nummultby(self, name, path, number):
"""Multiply the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonnummultby
+ """ # noqa
return self.execute_command(
"JSON.NUMMULTBY", name, str(path), self._encode(number)
)
@@ -100,11 +127,16 @@ class JSONCommands:
Return the count of cleared paths (ignoring non-array and non-objects
paths).
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonclear
+ """ # noqa
return self.execute_command("JSON.CLEAR", name, str(path))
def delete(self, key, path=Path.rootPath()):
- """Delete the JSON value stored at key ``key`` under ``path``."""
+ """Delete the JSON value stored at key ``key`` under ``path``.
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsondel
+ """
return self.execute_command("JSON.DEL", key, str(path))
# forget is an alias for delete
@@ -117,7 +149,9 @@ class JSONCommands:
``args`` is zero or more paths, and defaults to root path
```no_escape`` is a boolean flag to add no_escape option to get
non-ascii characters
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonget
+ """ # noqa
pieces = [name]
if no_escape:
pieces.append("noescape")
@@ -140,7 +174,9 @@ class JSONCommands:
"""
Get the objects stored as a JSON values under ``path``. ``keys``
is a list of one or more keys.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonmget
+ """ # noqa
pieces = []
pieces += keys
pieces.append(str(path))
@@ -157,6 +193,8 @@ class JSONCommands:
For the purpose of using this within a pipeline, this command is also
aliased to jsonset.
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonset
"""
if decode_keys:
obj = decode_dict_keys(obj)
@@ -178,7 +216,9 @@ class JSONCommands:
def strlen(self, name, path=None):
"""Return the length of the string JSON value under ``path`` at key
``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonstrlen
+ """ # noqa
pieces = [name]
if path is not None:
pieces.append(str(path))
@@ -187,14 +227,18 @@ class JSONCommands:
def toggle(self, name, path=Path.rootPath()):
"""Toggle boolean value under ``path`` at key ``name``.
returning the new value.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsontoggle
+ """ # noqa
return self.execute_command("JSON.TOGGLE", name, str(path))
def strappend(self, name, value, path=Path.rootPath()):
"""Append to the string JSON value. If two options are specified after
the key name, the path is determined to be the first. If a single
option is passed, then the rootpath (i.e Path.rootPath()) is used.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsonstrappend
+ """ # noqa
pieces = [name, str(path), self._encode(value)]
return self.execute_command(
"JSON.STRAPPEND", *pieces
@@ -203,7 +247,9 @@ class JSONCommands:
def debug(self, subcommand, key=None, path=Path.rootPath()):
"""Return the memory usage in bytes of a value under ``path`` from
key ``name``.
- """
+
+ For more information: https://oss.redis.com/redisjson/commands/#jsondebg
+ """ # noqa
valid_subcommands = ["MEMORY", "HELP"]
if subcommand not in valid_subcommands:
raise DataError("The only valid subcommands are ",
diff --git a/redis/commands/json/path.py b/redis/commands/json/path.py
index 6d87045..f0a413a 100644
--- a/redis/commands/json/path.py
+++ b/redis/commands/json/path.py
@@ -1,4 +1,4 @@
-class Path(object):
+class Path:
"""This class represents a path in a JSON value."""
strPath = ""
diff --git a/redis/commands/parser.py b/redis/commands/parser.py
new file mode 100644
index 0000000..26b190c
--- /dev/null
+++ b/redis/commands/parser.py
@@ -0,0 +1,119 @@
+from redis.exceptions import (
+ RedisError,
+ ResponseError
+)
+from redis.utils import str_if_bytes
+
+
+class CommandsParser:
+ """
+ Parses Redis commands to get command keys.
+ COMMAND output is used to determine key locations.
+ Commands that do not have a predefined key location are flagged with
+ 'movablekeys', and these commands' keys are determined by the command
+ 'COMMAND GETKEYS'.
+ """
+ def __init__(self, redis_connection):
+ self.initialized = False
+ self.commands = {}
+ self.initialize(redis_connection)
+
+ def initialize(self, r):
+ self.commands = r.execute_command("COMMAND")
+
+ # As soon as this PR is merged into Redis, we should reimplement
+ # our logic to use COMMAND INFO changes to determine the key positions
+ # https://github.com/redis/redis/pull/8324
+ def get_keys(self, redis_conn, *args):
+ """
+ Get the keys from the passed command
+ """
+ if len(args) < 2:
+ # The command has no keys in it
+ return None
+
+ cmd_name = args[0].lower()
+ if cmd_name not in self.commands:
+ # try to split the command name and to take only the main command,
+ # e.g. 'memory' for 'memory usage'
+ cmd_name_split = cmd_name.split()
+ cmd_name = cmd_name_split[0]
+ if cmd_name in self.commands:
+ # save the splitted command to args
+ args = cmd_name_split + list(args[1:])
+ else:
+ # We'll try to reinitialize the commands cache, if the engine
+ # version has changed, the commands may not be current
+ self.initialize(redis_conn)
+ if cmd_name not in self.commands:
+ raise RedisError(
+ f"{cmd_name.upper()} command doesn't exist in Redis commands"
+ )
+
+ command = self.commands.get(cmd_name)
+ if 'movablekeys' in command['flags']:
+ keys = self._get_moveable_keys(redis_conn, *args)
+ elif 'pubsub' in command['flags']:
+ keys = self._get_pubsub_keys(*args)
+ else:
+ if command['step_count'] == 0 and command['first_key_pos'] == 0 \
+ and command['last_key_pos'] == 0:
+ # The command doesn't have keys in it
+ return None
+ last_key_pos = command['last_key_pos']
+ if last_key_pos < 0:
+ last_key_pos = len(args) - abs(last_key_pos)
+ keys_pos = list(range(command['first_key_pos'], last_key_pos + 1,
+ command['step_count']))
+ keys = [args[pos] for pos in keys_pos]
+
+ return keys
+
+ def _get_moveable_keys(self, redis_conn, *args):
+ pieces = []
+ cmd_name = args[0]
+ # The command name should be splitted into separate arguments,
+ # e.g. 'MEMORY USAGE' will be splitted into ['MEMORY', 'USAGE']
+ pieces = pieces + cmd_name.split()
+ pieces = pieces + list(args[1:])
+ try:
+ keys = redis_conn.execute_command('COMMAND GETKEYS', *pieces)
+ except ResponseError as e:
+ message = e.__str__()
+ if 'Invalid arguments' in message or \
+ 'The command has no key arguments' in message:
+ return None
+ else:
+ raise e
+ return keys
+
+ def _get_pubsub_keys(self, *args):
+ """
+ Get the keys from pubsub command.
+ Although PubSub commands have predetermined key locations, they are not
+ supported in the 'COMMAND's output, so the key positions are hardcoded
+ in this method
+ """
+ if len(args) < 2:
+ # The command has no keys in it
+ return None
+ args = [str_if_bytes(arg) for arg in args]
+ command = args[0].upper()
+ if command == 'PUBSUB':
+ # the second argument is a part of the command name, e.g.
+ # ['PUBSUB', 'NUMSUB', 'foo'].
+ pubsub_type = args[1].upper()
+ if pubsub_type in ['CHANNELS', 'NUMSUB']:
+ keys = args[2:]
+ elif command in ['SUBSCRIBE', 'PSUBSCRIBE', 'UNSUBSCRIBE',
+ 'PUNSUBSCRIBE']:
+ # format example:
+ # SUBSCRIBE channel [channel ...]
+ keys = list(args[1:])
+ elif command == 'PUBLISH':
+ # format example:
+ # PUBLISH channel message
+ keys = [args[1]]
+ else:
+ keys = None
+ return keys
diff --git a/redis/commands/search/__init__.py b/redis/commands/search/__init__.py
index 8320ad4..a30cebe 100644
--- a/redis/commands/search/__init__.py
+++ b/redis/commands/search/__init__.py
@@ -7,7 +7,7 @@ class Search(SearchCommands):
It abstracts the API of the module and lets you just use the engine.
"""
- class BatchIndexer(object):
+ class BatchIndexer:
"""
A batch indexer allows you to automatically batch
document indexing in pipelines, flushing it every N documents.
diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py
index b391d1f..b1ac6b0 100644
--- a/redis/commands/search/aggregation.py
+++ b/redis/commands/search/aggregation.py
@@ -1,7 +1,7 @@
FIELDNAME = object()
-class Limit(object):
+class Limit:
def __init__(self, offset=0, count=0):
self.offset = offset
self.count = count
@@ -13,7 +13,7 @@ class Limit(object):
return []
-class Reducer(object):
+class Reducer:
"""
Base reducer object for all reducers.
@@ -55,7 +55,7 @@ class Reducer(object):
return self._args
-class SortDirection(object):
+class SortDirection:
"""
This special class is used to indicate sort direction.
"""
@@ -82,7 +82,7 @@ class Desc(SortDirection):
DIRSTRING = "DESC"
-class Group(object):
+class Group:
"""
This object automatically created in the `AggregateRequest.group_by()`
"""
@@ -109,7 +109,7 @@ class Group(object):
return ret
-class Projection(object):
+class Projection:
"""
This object automatically created in the `AggregateRequest.apply()`
"""
@@ -126,7 +126,7 @@ class Projection(object):
return ret
-class SortBy(object):
+class SortBy:
"""
This object automatically created in the `AggregateRequest.sort_by()`
"""
@@ -151,7 +151,7 @@ class SortBy(object):
return ret
-class AggregateRequest(object):
+class AggregateRequest:
"""
Aggregation request which can be passed to `Client.aggregate`.
"""
@@ -345,12 +345,6 @@ class AggregateRequest(object):
self._cursor = args
return self
- def _limit_2_args(self, limit):
- if limit[1]:
- return ["LIMIT"] + [str(x) for x in limit]
- else:
- return []
-
def build_args(self):
# @foo:bar ...
ret = [self._query]
@@ -376,7 +370,7 @@ class AggregateRequest(object):
return ret
-class Cursor(object):
+class Cursor:
def __init__(self, cid):
self.cid = cid
self.max_idle = 0
@@ -391,16 +385,15 @@ class Cursor(object):
return args
-class AggregateResult(object):
+class AggregateResult:
def __init__(self, rows, cursor, schema):
self.rows = rows
self.cursor = cursor
self.schema = schema
def __repr__(self):
- return "<{} at 0x{:x} Rows={}, Cursor={}>".format(
- self.__class__.__name__,
- id(self),
- len(self.rows),
- self.cursor.cid if self.cursor else -1,
+ cid = self.cursor.cid if self.cursor else -1
+ return (
+ f"<{self.__class__.__name__} at 0x{id(self):x} "
+ f"Rows={len(self.rows)}, Cursor={cid}>"
)
diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py
index 0cee2ad..553bc39 100644
--- a/redis/commands/search/commands.py
+++ b/redis/commands/search/commands.py
@@ -7,6 +7,7 @@ from .query import Query
from ._util import to_string
from .aggregation import AggregateRequest, AggregateResult, Cursor
from .suggestion import SuggestionParser
+from ..helpers import parse_to_dict
NUMERIC = "NUMERIC"
@@ -20,6 +21,7 @@ EXPLAIN_CMD = "FT.EXPLAIN"
EXPLAINCLI_CMD = "FT.EXPLAINCLI"
DEL_CMD = "FT.DEL"
AGGREGATE_CMD = "FT.AGGREGATE"
+PROFILE_CMD = "FT.PROFILE"
CURSOR_CMD = "FT.CURSOR"
SPELLCHECK_CMD = "FT.SPELLCHECK"
DICT_ADD_CMD = "FT.DICTADD"
@@ -77,7 +79,9 @@ class SearchCommands:
allow searching in specific fields
- **stopwords**: If not None, we create the index with this custom
stopword list. The list can be empty
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftcreate
+ """ # noqa
args = [CREATE_CMD, self.index_name]
if definition is not None:
@@ -107,7 +111,9 @@ class SearchCommands:
### Parameters:
- **fields**: a list of Field objects to add for the index
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftalter_schema_add
+ """ # noqa
args = [ALTER_CMD, self.index_name, "SCHEMA", "ADD"]
try:
@@ -117,17 +123,6 @@ class SearchCommands:
return self.execute_command(*args)
- def drop_index(self, delete_documents=True):
- """
- Drop the index if it exists. Deprecated from RediSearch 2.0.
-
- ### Parameters:
-
- - **delete_documents**: If `True`, all documents will be deleted.
- """
- keep_str = "" if delete_documents else "KEEPDOCS"
- return self.execute_command(DROP_CMD, self.index_name, keep_str)
-
def dropindex(self, delete_documents=False):
"""
Drop the index if it exists.
@@ -137,7 +132,8 @@ class SearchCommands:
### Parameters:
- **delete_documents**: If `True`, all documents will be deleted.
- """
+ For more information: https://oss.redis.com/redisearch/Commands/#ftdropindex
+ """ # noqa
keep_str = "" if delete_documents else "KEEPDOCS"
return self.execute_command(DROP_CMD, self.index_name, keep_str)
@@ -244,7 +240,9 @@ class SearchCommands:
- **fields** kwargs dictionary of the document fields to be saved
and/or indexed.
NOTE: Geo points shoule be encoded as strings of "lon,lat"
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftadd
+ """ # noqa
return self._add_document(
doc_id,
conn=None,
@@ -276,7 +274,9 @@ class SearchCommands:
- **replace**: if True, and the document already is in the index, we
perform an update and reindex the document
- **language**: Specify the language used for document tokenization.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftaddhash
+ """ # noqa
return self._add_document_hash(
doc_id,
conn=None,
@@ -294,7 +294,9 @@ class SearchCommands:
- **delete_actual_document**: if set to True, RediSearch also delete
the actual document if it is in the index
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftdel
+ """ # noqa
args = [DEL_CMD, self.index_name, doc_id]
if conn is None:
conn = self.client
@@ -325,6 +327,8 @@ class SearchCommands:
### Parameters
- **ids**: the ids of the saved documents.
+
+ For more information https://oss.redis.com/redisearch/Commands/#ftget
"""
return self.client.execute_command(MGET_CMD, self.index_name, *ids)
@@ -333,6 +337,8 @@ class SearchCommands:
"""
Get info an stats about the the current index, including the number of
documents, memory consumption, etc
+
+ For more information https://oss.redis.com/redisearch/Commands/#ftinfo
"""
res = self.client.execute_command(INFO_CMD, self.index_name)
@@ -346,7 +352,7 @@ class SearchCommands:
# convert the query from a text to a query object
query = Query(query)
if not isinstance(query, Query):
- raise ValueError("Bad query type %s" % type(query))
+ raise ValueError(f"Bad query type {type(query)}")
args += query.get_args()
return args, query
@@ -360,7 +366,9 @@ class SearchCommands:
- **query**: the search query. Either a text for simple queries with
default parameters, or a Query object for complex queries.
See RediSearch's documentation on query format
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftsearch
+ """ # noqa
args, query = self._mk_query_args(query)
st = time.time()
res = self.execute_command(SEARCH_CMD, *args)
@@ -374,6 +382,10 @@ class SearchCommands:
)
def explain(self, query):
+ """Returns the execution plan for a complex query.
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftexplain
+ """ # noqa
args, query_text = self._mk_query_args(query)
return self.execute_command(EXPLAIN_CMD, *args)
@@ -382,15 +394,17 @@ class SearchCommands:
def aggregate(self, query):
"""
- Issue an aggregation query
+ Issue an aggregation query.
### Parameters
- **query**: This can be either an `AggeregateRequest`, or a `Cursor`
+ **query**: This can be either an `AggregateRequest`, or a `Cursor`
An `AggregateResult` object is returned. You can access the rows from
its `rows` property, which will always yield the rows of the result.
- """
+
+ Fpr more information: https://oss.redis.com/redisearch/Commands/#ftaggregate
+ """ # noqa
if isinstance(query, AggregateRequest):
has_cursor = bool(query._cursor)
cmd = [AGGREGATE_CMD, self.index_name] + query.build_args()
@@ -401,6 +415,9 @@ class SearchCommands:
raise ValueError("Bad query", query)
raw = self.execute_command(*cmd)
+ return self._get_AggregateResult(raw, query, has_cursor)
+
+ def _get_AggregateResult(self, raw, query, has_cursor):
if has_cursor:
if isinstance(query, Cursor):
query.cid = raw[1]
@@ -418,8 +435,48 @@ class SearchCommands:
schema = None
rows = raw[1:]
- res = AggregateResult(rows, cursor, schema)
- return res
+ return AggregateResult(rows, cursor, schema)
+
+ def profile(self, query, limited=False):
+ """
+ Performs a search or aggregate command and collects performance
+ information.
+
+ ### Parameters
+
+ **query**: This can be either an `AggregateRequest`, `Query` or
+ string.
+ **limited**: If set to True, removes details of reader iterator.
+
+ """
+ st = time.time()
+ cmd = [PROFILE_CMD, self.index_name, ""]
+ if limited:
+ cmd.append("LIMITED")
+ cmd.append('QUERY')
+
+ if isinstance(query, AggregateRequest):
+ cmd[2] = "AGGREGATE"
+ cmd += query.build_args()
+ elif isinstance(query, Query):
+ cmd[2] = "SEARCH"
+ cmd += query.get_args()
+ else:
+ raise ValueError("Must provide AggregateRequest object or "
+ "Query object.")
+
+ res = self.execute_command(*cmd)
+
+ if isinstance(query, AggregateRequest):
+ result = self._get_AggregateResult(res[0], query, query._cursor)
+ else:
+ result = Result(res[0],
+ not query._no_content,
+ duration=(time.time() - st) * 1000.0,
+ has_payload=query._with_payloads,
+ with_scores=query._with_scores,)
+
+ return result, parse_to_dict(res[1])
def spellcheck(self, query, distance=None, include=None, exclude=None):
"""
@@ -432,7 +489,9 @@ class SearchCommands:
suggestions (default: 1, max: 4).
**include**: specifies an inclusion custom dictionary.
**exclude**: specifies an exclusion custom dictionary.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftspellcheck
+ """ # noqa
cmd = [SPELLCHECK_CMD, self.index_name, query]
if distance:
cmd.extend(["DISTANCE", distance])
@@ -489,7 +548,9 @@ class SearchCommands:
- **name**: Dictionary name.
- **terms**: List of items for adding to the dictionary.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftdictadd
+ """ # noqa
cmd = [DICT_ADD_CMD, name]
cmd.extend(terms)
return self.execute_command(*cmd)
@@ -501,7 +562,9 @@ class SearchCommands:
- **name**: Dictionary name.
- **terms**: List of items for removing from the dictionary.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftdictdel
+ """ # noqa
cmd = [DICT_DEL_CMD, name]
cmd.extend(terms)
return self.execute_command(*cmd)
@@ -512,7 +575,9 @@ class SearchCommands:
### Parameters
- **name**: Dictionary name.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftdictdump
+ """ # noqa
cmd = [DICT_DUMP_CMD, name]
return self.execute_command(*cmd)
@@ -523,7 +588,9 @@ class SearchCommands:
- **option**: the name of the configuration option.
- **value**: a value for the configuration option.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftconfig
+ """ # noqa
cmd = [CONFIG_CMD, "SET", option, value]
raw = self.execute_command(*cmd)
return raw == "OK"
@@ -534,7 +601,9 @@ class SearchCommands:
### Parameters
- **option**: the name of the configuration option.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftconfig
+ """ # noqa
cmd = [CONFIG_CMD, "GET", option]
res = {}
raw = self.execute_command(*cmd)
@@ -550,7 +619,9 @@ class SearchCommands:
### Parameters
- **tagfield**: Tag field name
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#fttagvals
+ """ # noqa
return self.execute_command(TAGVALS_CMD, self.index_name, tagfield)
@@ -561,7 +632,9 @@ class SearchCommands:
### Parameters
- **alias**: Name of the alias to create
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftaliasadd
+ """ # noqa
return self.execute_command(ALIAS_ADD_CMD, alias, self.index_name)
@@ -572,7 +645,9 @@ class SearchCommands:
### Parameters
- **alias**: Name of the alias to create
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftaliasupdate
+ """ # noqa
return self.execute_command(ALIAS_UPDATE_CMD, alias, self.index_name)
@@ -583,7 +658,9 @@ class SearchCommands:
### Parameters
- **alias**: Name of the alias to delete
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftaliasdel
+ """ # noqa
return self.execute_command(ALIAS_DEL_CMD, alias)
def sugadd(self, key, *suggestions, **kwargs):
@@ -592,8 +669,9 @@ class SearchCommands:
a score and string.
If kwargs["increment"] is true and the terms are already in the
server's dictionary, we increment their scores.
- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsugadd>`_. # noqa
- """
+
+ For more information: https://oss.redis.com/redisearch/master/Commands/#ftsugadd
+ """ # noqa
# If Transaction is not False it will MULTI/EXEC which will error
pipe = self.pipeline(transaction=False)
for sug in suggestions:
@@ -611,16 +689,18 @@ class SearchCommands:
def suglen(self, key):
"""
Return the number of entries in the AutoCompleter index.
- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsuglen>`_. # noqa
- """
+
+ For more information https://oss.redis.com/redisearch/master/Commands/#ftsuglen
+ """ # noqa
return self.execute_command(SUGLEN_COMMAND, key)
def sugdel(self, key, string):
"""
Delete a string from the AutoCompleter index.
Returns 1 if the string was found and deleted, 0 otherwise.
- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsugdel>`_. # noqa
- """
+
+ For more information: https://oss.redis.com/redisearch/master/Commands/#ftsugdel
+ """ # noqa
return self.execute_command(SUGDEL_COMMAND, key, string)
def sugget(
@@ -629,7 +709,6 @@ class SearchCommands:
):
"""
Get a list of suggestions from the AutoCompleter, for a given prefix.
- More information `here <https://oss.redis.com/redisearch/master/Commands/#ftsugget>`_. # noqa
Parameters:
@@ -656,7 +735,9 @@ class SearchCommands:
list:
A list of Suggestion objects. If with_scores was False, the
score of all suggestions is 1.
- """
+
+ For more information: https://oss.redis.com/redisearch/master/Commands/#ftsugget
+ """ # noqa
args = [SUGGET_COMMAND, key, prefix, "MAX", num]
if fuzzy:
args.append(FUZZY)
@@ -688,7 +769,9 @@ class SearchCommands:
If set to true, we do not scan and index.
terms :
The terms.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftsynupdate
+ """ # noqa
cmd = [SYNUPDATE_CMD, self.index_name, groupid]
if skipinitial:
cmd.extend(["SKIPINITIALSCAN"])
@@ -701,6 +784,8 @@ class SearchCommands:
The command is used to dump the synonyms data structure.
Returns a list of synonym terms and their synonym group ids.
- """
+
+ For more information: https://oss.redis.com/redisearch/Commands/#ftsyndump
+ """ # noqa
raw = self.execute_command(SYNDUMP_CMD, self.index_name)
return {raw[i]: raw[i + 1] for i in range(0, len(raw), 2)}
diff --git a/redis/commands/search/document.py b/redis/commands/search/document.py
index 0d4255d..5b30505 100644
--- a/redis/commands/search/document.py
+++ b/redis/commands/search/document.py
@@ -1,4 +1,4 @@
-class Document(object):
+class Document:
"""
Represents a single document in a result set
"""
@@ -10,4 +10,4 @@ class Document(object):
setattr(self, k, v)
def __repr__(self):
- return "Document %s" % self.__dict__
+ return f"Document {self.__dict__}"
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
index 45114a4..076c872 100644
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -1,4 +1,4 @@
-class Field(object):
+class Field:
NUMERIC = "NUMERIC"
TEXT = "TEXT"
diff --git a/redis/commands/search/indexDefinition.py b/redis/commands/search/indexDefinition.py
index 4fbc609..0c7a3b0 100644
--- a/redis/commands/search/indexDefinition.py
+++ b/redis/commands/search/indexDefinition.py
@@ -8,7 +8,7 @@ class IndexType(Enum):
JSON = 2
-class IndexDefinition(object):
+class IndexDefinition:
"""IndexDefinition is used to define a index definition for automatic
indexing on Hash or Json update."""
@@ -38,8 +38,7 @@ class IndexDefinition(object):
elif index_type is IndexType.JSON:
self.args.extend(["ON", "JSON"])
elif index_type is not None:
- raise RuntimeError("index_type must be one of {}".
- format(list(IndexType)))
+ raise RuntimeError(f"index_type must be one of {list(IndexType)}")
def _appendPrefix(self, prefix):
"""Append PREFIX."""
diff --git a/redis/commands/search/query.py b/redis/commands/search/query.py
index 85a8255..5534f7b 100644
--- a/redis/commands/search/query.py
+++ b/redis/commands/search/query.py
@@ -1,4 +1,4 @@
-class Query(object):
+class Query:
"""
Query is used to build complex queries that have more parameters than just
the query string. The query string is set in the constructor, and other
@@ -291,7 +291,7 @@ class Query(object):
return self
-class Filter(object):
+class Filter:
def __init__(self, keyword, field, *args):
self.args = [keyword, field] + list(args)
@@ -303,8 +303,8 @@ class NumericFilter(Filter):
def __init__(self, field, minval, maxval, minExclusive=False,
maxExclusive=False):
args = [
- minval if not minExclusive else "({}".format(minval),
- maxval if not maxExclusive else "({}".format(maxval),
+ minval if not minExclusive else f"({minval}",
+ maxval if not maxExclusive else f"({maxval}",
]
Filter.__init__(self, "FILTER", field, *args)
@@ -320,6 +320,6 @@ class GeoFilter(Filter):
Filter.__init__(self, "GEOFILTER", field, lon, lat, radius, unit)
-class SortbyField(object):
+class SortbyField:
def __init__(self, field, asc=True):
self.args = [field, "ASC" if asc else "DESC"]
diff --git a/redis/commands/search/querystring.py b/redis/commands/search/querystring.py
index aecd3b8..ffba542 100644
--- a/redis/commands/search/querystring.py
+++ b/redis/commands/search/querystring.py
@@ -61,7 +61,7 @@ def geo(lat, lon, radius, unit="km"):
return GeoValue(lat, lon, radius, unit)
-class Value(object):
+class Value:
@property
def combinable(self):
"""
@@ -134,7 +134,7 @@ class GeoValue(Value):
self.unit = unit
-class Node(object):
+class Node:
def __init__(self, *children, **kwparams):
"""
Create a node
@@ -197,13 +197,11 @@ class Node(object):
def join_fields(self, key, vals):
if len(vals) == 1:
- return [BaseNode("@{}:{}".format(key, vals[0].to_string()))]
+ return [BaseNode(f"@{key}:{vals[0].to_string()}")]
if not vals[0].combinable:
- return [BaseNode("@{}:{}".format(key,
- v.to_string())) for v in vals]
+ return [BaseNode(f"@{key}:{v.to_string()}") for v in vals]
s = BaseNode(
- "@{}:({})".format(key,
- self.JOINSTR.join(v.to_string() for v in vals))
+ f"@{key}:({self.JOINSTR.join(v.to_string() for v in vals)})"
)
return [s]
@@ -220,9 +218,7 @@ class Node(object):
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
pre, post = ("(", ")") if with_parens else ("", "")
- return "{}{}{}".format(
- pre, self.JOINSTR.join(n.to_string() for n in self.params), post
- )
+ return f"{pre}{self.JOINSTR.join(n.to_string() for n in self.params)}{post}"
def _should_use_paren(self, optval):
if optval is not None:
@@ -235,7 +231,7 @@ class Node(object):
class BaseNode(Node):
def __init__(self, s):
- super(BaseNode, self).__init__()
+ super().__init__()
self.s = str(s)
def to_string(self, with_parens=None):
@@ -268,7 +264,7 @@ class DisjunctNode(IntersectNode):
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
- ret = super(DisjunctNode, self).to_string(with_parens=False)
+ ret = super().to_string(with_parens=False)
if with_parens:
return "(-" + ret + ")"
else:
@@ -294,7 +290,7 @@ class OptionalNode(IntersectNode):
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
- ret = super(OptionalNode, self).to_string(with_parens=False)
+ ret = super().to_string(with_parens=False)
if with_parens:
return "(~" + ret + ")"
else:
diff --git a/redis/commands/search/reducers.py b/redis/commands/search/reducers.py
index 6cbbf2f..41ed11a 100644
--- a/redis/commands/search/reducers.py
+++ b/redis/commands/search/reducers.py
@@ -3,7 +3,7 @@ from .aggregation import Reducer, SortDirection
class FieldOnlyReducer(Reducer):
def __init__(self, field):
- super(FieldOnlyReducer, self).__init__(field)
+ super().__init__(field)
self._field = field
@@ -15,7 +15,7 @@ class count(Reducer):
NAME = "COUNT"
def __init__(self):
- super(count, self).__init__()
+ super().__init__()
class sum(FieldOnlyReducer):
@@ -26,7 +26,7 @@ class sum(FieldOnlyReducer):
NAME = "SUM"
def __init__(self, field):
- super(sum, self).__init__(field)
+ super().__init__(field)
class min(FieldOnlyReducer):
@@ -37,7 +37,7 @@ class min(FieldOnlyReducer):
NAME = "MIN"
def __init__(self, field):
- super(min, self).__init__(field)
+ super().__init__(field)
class max(FieldOnlyReducer):
@@ -48,7 +48,7 @@ class max(FieldOnlyReducer):
NAME = "MAX"
def __init__(self, field):
- super(max, self).__init__(field)
+ super().__init__(field)
class avg(FieldOnlyReducer):
@@ -59,7 +59,7 @@ class avg(FieldOnlyReducer):
NAME = "AVG"
def __init__(self, field):
- super(avg, self).__init__(field)
+ super().__init__(field)
class tolist(FieldOnlyReducer):
@@ -70,7 +70,7 @@ class tolist(FieldOnlyReducer):
NAME = "TOLIST"
def __init__(self, field):
- super(tolist, self).__init__(field)
+ super().__init__(field)
class count_distinct(FieldOnlyReducer):
@@ -82,7 +82,7 @@ class count_distinct(FieldOnlyReducer):
NAME = "COUNT_DISTINCT"
def __init__(self, field):
- super(count_distinct, self).__init__(field)
+ super().__init__(field)
class count_distinctish(FieldOnlyReducer):
@@ -104,7 +104,7 @@ class quantile(Reducer):
NAME = "QUANTILE"
def __init__(self, field, pct):
- super(quantile, self).__init__(field, str(pct))
+ super().__init__(field, str(pct))
self._field = field
@@ -116,7 +116,7 @@ class stddev(FieldOnlyReducer):
NAME = "STDDEV"
def __init__(self, field):
- super(stddev, self).__init__(field)
+ super().__init__(field)
class first_value(Reducer):
@@ -155,7 +155,7 @@ class first_value(Reducer):
args = [field]
if fieldstrs:
args += ["BY"] + fieldstrs
- super(first_value, self).__init__(*args)
+ super().__init__(*args)
self._field = field
@@ -174,5 +174,5 @@ class random_sample(Reducer):
**size**: Return this many items (can be less)
"""
args = [field, str(size)]
- super(random_sample, self).__init__(*args)
+ super().__init__(*args)
self._field = field
diff --git a/redis/commands/search/result.py b/redis/commands/search/result.py
index 9cd922a..57ba53d 100644
--- a/redis/commands/search/result.py
+++ b/redis/commands/search/result.py
@@ -2,7 +2,7 @@ from .document import Document
from ._util import to_string
-class Result(object):
+class Result:
"""
Represents the result of a search query, and has an array of Document
objects
@@ -70,4 +70,4 @@ class Result(object):
self.docs.append(doc)
def __repr__(self):
- return "Result{%d total, docs: %s}" % (self.total, self.docs)
+ return f"Result{{{self.total} total, docs: {self.docs}}}"
diff --git a/redis/commands/search/suggestion.py b/redis/commands/search/suggestion.py
index 3401af9..6d295a6 100644
--- a/redis/commands/search/suggestion.py
+++ b/redis/commands/search/suggestion.py
@@ -1,7 +1,7 @@
from ._util import to_string
-class Suggestion(object):
+class Suggestion:
"""
Represents a single suggestion being sent or returned from the
autocomplete server
@@ -16,7 +16,7 @@ class Suggestion(object):
return self.string
-class SuggestionParser(object):
+class SuggestionParser:
"""
Internal class used to parse results from the `SUGGET` command.
This needs to consume either 1, 2, or 3 values at a time from
diff --git a/redis/commands/timeseries/commands.py b/redis/commands/timeseries/commands.py
index 3b9ee0f..460ba76 100644
--- a/redis/commands/timeseries/commands.py
+++ b/redis/commands/timeseries/commands.py
@@ -26,8 +26,6 @@ class TimeSeriesCommands:
def create(self, key, **kwargs):
"""
Create a new time-series.
- For more information see
- `TS.CREATE <https://oss.redis.com/redistimeseries/master/commands/#tscreate>`_. # noqa
Args:
@@ -60,7 +58,9 @@ class TimeSeriesCommands:
- 'min': only override if the value is lower than the existing value.
- 'max': only override if the value is higher than the existing value.
When this is not set, the server-wide default will be used.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/commands/#tscreate
+ """ # noqa
retention_msecs = kwargs.get("retention_msecs", None)
uncompressed = kwargs.get("uncompressed", False)
labels = kwargs.get("labels", {})
@@ -79,10 +79,11 @@ class TimeSeriesCommands:
"""
Update the retention, labels of an existing key.
For more information see
- `TS.ALTER <https://oss.redis.com/redistimeseries/master/commands/#tsalter>`_. # noqa
The parameters are the same as TS.CREATE.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/commands/#tsalter
+ """ # noqa
retention_msecs = kwargs.get("retention_msecs", None)
labels = kwargs.get("labels", {})
duplicate_policy = kwargs.get("duplicate_policy", None)
@@ -97,7 +98,6 @@ class TimeSeriesCommands:
"""
Append (or create and append) a new sample to the series.
For more information see
- `TS.ADD <https://oss.redis.com/redistimeseries/master/commands/#tsadd>`_. # noqa
Args:
@@ -129,7 +129,9 @@ class TimeSeriesCommands:
- 'min': only override if the value is lower than the existing value.
- 'max': only override if the value is higher than the existing value.
When this is not set, the server-wide default will be used.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsadd
+ """ # noqa
retention_msecs = kwargs.get("retention_msecs", None)
uncompressed = kwargs.get("uncompressed", False)
labels = kwargs.get("labels", {})
@@ -150,9 +152,9 @@ class TimeSeriesCommands:
`key` with `timestamp`.
Expects a list of `tuples` as (`key`,`timestamp`, `value`).
Return value is an array with timestamps of insertions.
- For more information see
- `TS.MADD <https://oss.redis.com/redistimeseries/master/commands/#tsmadd>`_. # noqa
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmadd
+ """ # noqa
params = []
for ktv in ktv_tuples:
for item in ktv:
@@ -166,8 +168,6 @@ class TimeSeriesCommands:
sample's of a series.
This command can be used as a counter or gauge that automatically gets
history as a time series.
- For more information see
- `TS.INCRBY <https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby>`_. # noqa
Args:
@@ -189,7 +189,9 @@ class TimeSeriesCommands:
chunk_size:
Each time-series uses chunks of memory of fixed size for time series samples.
You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby
+ """ # noqa
timestamp = kwargs.get("timestamp", None)
retention_msecs = kwargs.get("retention_msecs", None)
uncompressed = kwargs.get("uncompressed", False)
@@ -210,8 +212,6 @@ class TimeSeriesCommands:
latest sample's of a series.
This command can be used as a counter or gauge that
automatically gets history as a time series.
- For more information see
- `TS.DECRBY <https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby>`_. # noqa
Args:
@@ -237,7 +237,9 @@ class TimeSeriesCommands:
chunk_size:
Each time-series uses chunks of memory of fixed size for time series samples.
You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby
+ """ # noqa
timestamp = kwargs.get("timestamp", None)
retention_msecs = kwargs.get("retention_msecs", None)
uncompressed = kwargs.get("uncompressed", False)
@@ -260,7 +262,6 @@ class TimeSeriesCommands:
and end data points will also be deleted.
Return the count for deleted items.
For more information see
- `TS.DEL <https://oss.redis.com/redistimeseries/master/commands/#tsdel>`_. # noqa
Args:
@@ -270,7 +271,9 @@ class TimeSeriesCommands:
Start timestamp for the range deletion.
to_time:
End timestamp for the range deletion.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsdel
+ """ # noqa
return self.execute_command(DEL_CMD, key, from_time, to_time)
def createrule(
@@ -285,9 +288,9 @@ class TimeSeriesCommands:
Aggregating for `bucket_size_msec` where an `aggregation_type` can be
[`avg`, `sum`, `min`, `max`, `range`, `count`, `first`, `last`,
`std.p`, `std.s`, `var.p`, `var.s`]
- For more information see
- `TS.CREATERULE <https://oss.redis.com/redistimeseries/master/commands/#tscreaterule>`_. # noqa
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tscreaterule
+ """ # noqa
params = [source_key, dest_key]
self._appendAggregation(params, aggregation_type, bucket_size_msec)
@@ -297,8 +300,9 @@ class TimeSeriesCommands:
"""
Delete a compaction rule.
For more information see
- `TS.DELETERULE <https://oss.redis.com/redistimeseries/master/commands/#tsdeleterule>`_. # noqa
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsdeleterule
+ """ # noqa
return self.execute_command(DELETERULE_CMD, source_key, dest_key)
def __range_params(
@@ -343,8 +347,6 @@ class TimeSeriesCommands:
):
"""
Query a range in forward direction for a specific time-serie.
- For more information see
- `TS.RANGE <https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange>`_. # noqa
Args:
@@ -374,7 +376,9 @@ class TimeSeriesCommands:
by_min_value).
align:
Timestamp for alignment control for aggregation.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange
+ """ # noqa
params = self.__range_params(
key,
from_time,
@@ -404,8 +408,6 @@ class TimeSeriesCommands:
):
"""
Query a range in reverse direction for a specific time-series.
- For more information see
- `TS.REVRANGE <https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange>`_. # noqa
**Note**: This command is only available since RedisTimeSeries >= v1.4
@@ -432,7 +434,9 @@ class TimeSeriesCommands:
Filter result by maximum value (must mention also filter_by_min_value).
align:
Timestamp for alignment control for aggregation.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange
+ """ # noqa
params = self.__range_params(
key,
from_time,
@@ -500,8 +504,6 @@ class TimeSeriesCommands:
):
"""
Query a range across multiple time-series by filters in forward direction.
- For more information see
- `TS.MRANGE <https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange>`_. # noqa
Args:
@@ -544,7 +546,9 @@ class TimeSeriesCommands:
pair labels of a series.
align:
Timestamp for alignment control for aggregation.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange
+ """ # noqa
params = self.__mrange_params(
aggregation_type,
bucket_size_msec,
@@ -583,8 +587,6 @@ class TimeSeriesCommands:
):
"""
Query a range across multiple time-series by filters in reverse direction.
- For more information see
- `TS.MREVRANGE <https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange>`_. # noqa
Args:
@@ -629,7 +631,9 @@ class TimeSeriesCommands:
labels of a series.
align:
Timestamp for alignment control for aggregation.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange
+ """ # noqa
params = self.__mrange_params(
aggregation_type,
bucket_size_msec,
@@ -652,15 +656,17 @@ class TimeSeriesCommands:
def get(self, key):
""" # noqa
Get the last sample of `key`.
- For more information see `TS.GET <https://oss.redis.com/redistimeseries/master/commands/#tsget>`_.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsget
+ """ # noqa
return self.execute_command(GET_CMD, key)
def mget(self, filters, with_labels=False):
""" # noqa
Get the last samples matching the specific `filter`.
- For more information see `TS.MGET <https://oss.redis.com/redistimeseries/master/commands/#tsmget>`_.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmget
+ """ # noqa
params = []
self._appendWithLabels(params, with_labels)
params.extend(["FILTER"])
@@ -670,15 +676,17 @@ class TimeSeriesCommands:
def info(self, key):
""" # noqa
Get information of `key`.
- For more information see `TS.INFO <https://oss.redis.com/redistimeseries/master/commands/#tsinfo>`_.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsinfo
+ """ # noqa
return self.execute_command(INFO_CMD, key)
def queryindex(self, filters):
""" # noqa
Get all the keys matching the `filter` list.
- For more information see `TS.QUERYINDEX <https://oss.redis.com/redistimeseries/master/commands/#tsqueryindex>`_.
- """
+
+ For more information: https://oss.redis.com/redistimeseries/master/commands/#tsqueryindex
+ """ # noq
return self.execute_command(QUERYINDEX_CMD, *filters)
@staticmethod
diff --git a/redis/commands/timeseries/info.py b/redis/commands/timeseries/info.py
index 3b89503..2b8acd1 100644
--- a/redis/commands/timeseries/info.py
+++ b/redis/commands/timeseries/info.py
@@ -2,7 +2,7 @@ from .utils import list_to_dict
from ..helpers import nativestr
-class TSInfo(object):
+class TSInfo:
"""
Hold information and statistics on the time-series.
Can be created using ``tsinfo`` command
diff --git a/redis/connection.py b/redis/connection.py
index e01742d..ef3a667 100755
--- a/redis/connection.py
+++ b/redis/connection.py
@@ -1,4 +1,4 @@
-from distutils.version import LooseVersion
+from packaging.version import Version
from itertools import chain
from time import time
from queue import LifoQueue, Empty, Full
@@ -11,6 +11,7 @@ import socket
import threading
import weakref
+from redis.backoff import NoBackoff
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
@@ -28,9 +29,9 @@ from redis.exceptions import (
TimeoutError,
ModuleError,
)
-from redis.utils import HIREDIS_AVAILABLE, str_if_bytes
-from redis.backoff import NoBackoff
+
from redis.retry import Retry
+from redis.utils import HIREDIS_AVAILABLE, str_if_bytes
try:
import ssl
@@ -54,13 +55,13 @@ NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
if HIREDIS_AVAILABLE:
import hiredis
- hiredis_version = LooseVersion(hiredis.__version__)
+ hiredis_version = Version(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
- hiredis_version >= LooseVersion('0.1.3')
+ hiredis_version >= Version('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
- hiredis_version >= LooseVersion('0.1.4')
+ hiredis_version >= Version('0.1.4')
HIREDIS_SUPPORTS_ENCODING_ERRORS = \
- hiredis_version >= LooseVersion('1.0.0')
+ hiredis_version >= Version('1.0.0')
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it
@@ -106,8 +107,8 @@ class Encoder:
elif not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
- raise DataError("Invalid input of type: '%s'. Convert to a "
- "bytes, string, int or float first." % typename)
+ raise DataError(f"Invalid input of type: '{typename}'. "
+ f"Convert to a bytes, string, int or float first.")
if isinstance(value, str):
value = value.encode(self.encoding, self.encoding_errors)
return value
@@ -213,8 +214,7 @@ class SocketBuffer:
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
- raise ConnectionError("Error while reading from socket: %s" %
- (ex.args,))
+ raise ConnectionError(f"Error while reading from socket: {ex.args}")
finally:
if custom_timeout:
sock.settimeout(self.socket_timeout)
@@ -314,7 +314,7 @@ class PythonParser(BaseParser):
def can_read(self, timeout):
return self._buffer and self._buffer.can_read(timeout)
- def read_response(self):
+ def read_response(self, disable_decoding=False):
raw = self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
@@ -322,7 +322,7 @@ class PythonParser(BaseParser):
byte, response = raw[:1], raw[1:]
if byte not in (b'-', b'+', b':', b'$', b'*'):
- raise InvalidResponse("Protocol Error: %r" % raw)
+ raise InvalidResponse(f"Protocol Error: {raw!r}")
# server returned an error
if byte == b'-':
@@ -354,8 +354,9 @@ class PythonParser(BaseParser):
length = int(response)
if length == -1:
return None
- response = [self.read_response() for i in range(length)]
- if isinstance(response, bytes):
+ response = [self.read_response(disable_decoding=disable_decoding)
+ for i in range(length)]
+ if isinstance(response, bytes) and disable_decoding is False:
response = self.encoder.decode(response)
return response
@@ -443,13 +444,12 @@ class HiredisParser(BaseParser):
allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
if not raise_on_timeout and ex.errno == allowed:
return False
- raise ConnectionError("Error while reading from socket: %s" %
- (ex.args,))
+ raise ConnectionError(f"Error while reading from socket: {ex.args}")
finally:
if custom_timeout:
sock.settimeout(self._socket_timeout)
- def read_response(self):
+ def read_response(self, disable_decoding=False):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
@@ -498,7 +498,7 @@ class Connection:
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536,
health_check_interval=0, client_name=None, username=None,
- retry=None):
+ retry=None, redis_connect_func=None):
"""
Initialize a new Connection.
To specify a retry policy, first set `retry_on_timeout` to `True`
@@ -528,14 +528,16 @@ class Connection:
self.health_check_interval = health_check_interval
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
+ self.redis_connect_func = redis_connect_func
self._sock = None
- self._parser = parser_class(socket_read_size=socket_read_size)
+ self._socket_read_size = socket_read_size
+ self.set_parser(parser_class)
self._connect_callbacks = []
self._buffer_cutoff = 6000
def __repr__(self):
- repr_args = ','.join(['%s=%s' % (k, v) for k, v in self.repr_pieces()])
- return '%s<%s>' % (self.__class__.__name__, repr_args)
+ repr_args = ','.join([f'{k}={v}' for k, v in self.repr_pieces()])
+ return f'{self.__class__.__name__}<{repr_args}>'
def repr_pieces(self):
pieces = [
@@ -559,6 +561,14 @@ class Connection:
def clear_connect_callbacks(self):
self._connect_callbacks = []
+ def set_parser(self, parser_class):
+ """
+ Creates a new instance of parser_class with socket size:
+ _socket_read_size and assigns it to the parser for the connection
+ :param parser_class: The required parser class
+ """
+ self._parser = parser_class(socket_read_size=self._socket_read_size)
+
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
@@ -567,12 +577,17 @@ class Connection:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
- except socket.error as e:
+ except OSError as e:
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
- self.on_connect()
+ if self.redis_connect_func is None:
+ # Use the default on_connect function
+ self.on_connect()
+ else:
+ # Use the passed function redis_connect_func
+ self.redis_connect_func(self)
except RedisError:
# clean up after any error in on_connect
self.disconnect()
@@ -629,11 +644,12 @@ class Connection:
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
- return "Error connecting to %s:%s. %s." % \
- (self.host, self.port, exception.args[0])
+ return f"Error connecting to {self.host}:{self.port}. {exception.args[0]}."
else:
- return "Error %s connecting to %s:%s. %s." % \
- (exception.args[0], self.host, self.port, exception.args[1])
+ return (
+ f"Error {exception.args[0]} connecting to "
+ f"{self.host}:{self.port}. {exception.args[1]}."
+ )
def on_connect(self):
"Initialize the connection, authenticate and select a database"
@@ -717,15 +733,14 @@ class Connection:
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
- except socket.error as e:
+ except OSError as e:
self.disconnect()
if len(e.args) == 1:
errno, errmsg = 'UNKNOWN', e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
- raise ConnectionError("Error %s while writing to socket. %s." %
- (errno, errmsg))
+ raise ConnectionError(f"Error {errno} while writing to socket. {errmsg}.")
except BaseException:
self.disconnect()
raise
@@ -742,18 +757,20 @@ class Connection:
self.connect()
return self._parser.can_read(timeout)
- def read_response(self):
+ def read_response(self, disable_decoding=False):
"""Read the response from a previously sent command"""
try:
- response = self._parser.read_response()
+ response = self._parser.read_response(
+ disable_decoding=disable_decoding
+ )
except socket.timeout:
self.disconnect()
- raise TimeoutError("Timeout reading from %s:%s" %
- (self.host, self.port))
- except socket.error as e:
+ raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
+ except OSError as e:
self.disconnect()
- raise ConnectionError("Error while reading from %s:%s : %s" %
- (self.host, self.port, e.args))
+ raise ConnectionError(
+ f"Error while reading from {self.host}:{self.port}"
+ f" : {e.args}")
except BaseException:
self.disconnect()
raise
@@ -848,8 +865,7 @@ class SSLConnection(Connection):
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
- "Invalid SSL Certificate Requirements Flag: %s" %
- ssl_cert_reqs)
+ f"Invalid SSL Certificate Requirements Flag: {ssl_cert_reqs}")
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
@@ -903,7 +919,8 @@ class UnixDomainSocketConnection(Connection):
self.next_health_check = 0
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self._sock = None
- self._parser = parser_class(socket_read_size=socket_read_size)
+ self._socket_read_size = socket_read_size
+ self.set_parser(parser_class)
self._connect_callbacks = []
self._buffer_cutoff = 6000
@@ -927,11 +944,12 @@ class UnixDomainSocketConnection(Connection):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
- return "Error connecting to unix socket: %s. %s." % \
- (self.path, exception.args[0])
+ return f"Error connecting to unix socket: {self.path}. {exception.args[0]}."
else:
- return "Error %s connecting to unix socket: %s. %s." % \
- (exception.args[0], self.path, exception.args[1])
+ return (
+ f"Error {exception.args[0]} connecting to unix socket: "
+ f"{self.path}. {exception.args[1]}."
+ )
FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')
@@ -970,7 +988,7 @@ def parse_url(url):
kwargs[name] = parser(value)
except (TypeError, ValueError):
raise ValueError(
- "Invalid value for `%s` in connection URL." % name
+ f"Invalid value for `{name}` in connection URL."
)
else:
kwargs[name] = value
@@ -1003,9 +1021,8 @@ def parse_url(url):
if url.scheme == 'rediss':
kwargs['connection_class'] = SSLConnection
else:
- valid_schemes = 'redis://, rediss://, unix://'
raise ValueError('Redis URL must specify one of the following '
- 'schemes (%s)' % valid_schemes)
+ 'schemes (redis://, rediss://, unix://)')
return kwargs
@@ -1089,9 +1106,9 @@ class ConnectionPool:
self.reset()
def __repr__(self):
- return "%s<%s>" % (
- type(self).__name__,
- repr(self.connection_class(**self.connection_kwargs)),
+ return (
+ f"{type(self).__name__}"
+ f"<{repr(self.connection_class(**self.connection_kwargs))}>"
)
def reset(self):
diff --git a/redis/crc.py b/redis/crc.py
new file mode 100644
index 0000000..7d2ee50
--- /dev/null
+++ b/redis/crc.py
@@ -0,0 +1,24 @@
+from binascii import crc_hqx
+
+# Redis Cluster's key space is divided into 16384 slots.
+# For more information see: https://github.com/redis/redis/issues/2576
+REDIS_CLUSTER_HASH_SLOTS = 16384
+
+__all__ = [
+ "key_slot",
+ "REDIS_CLUSTER_HASH_SLOTS"
+]
+
+
+def key_slot(key, bucket=REDIS_CLUSTER_HASH_SLOTS):
+ """Calculate key slot for a given key.
+ See Keys distribution model in https://redis.io/topics/cluster-spec
+ :param key - bytes
+ :param bucket - int
+ """
+ start = key.find(b"{")
+ if start > -1:
+ end = key.find(b"}", start + 1)
+ if end > -1 and end != start + 1:
+ key = key[start + 1: end]
+ return crc_hqx(key, 0) % bucket
diff --git a/redis/exceptions.py b/redis/exceptions.py
index 91eb3c7..eb6ecc2 100644
--- a/redis/exceptions.py
+++ b/redis/exceptions.py
@@ -84,3 +84,105 @@ class AuthenticationWrongNumberOfArgsError(ResponseError):
were sent to the AUTH command
"""
pass
+
+
+class RedisClusterException(Exception):
+ """
+ Base exception for the RedisCluster client
+ """
+ pass
+
+
+class ClusterError(RedisError):
+ """
+ Cluster errors occurred multiple times, resulting in an exhaustion of the
+ command execution TTL
+ """
+ pass
+
+
+class ClusterDownError(ClusterError, ResponseError):
+ """
+ Error indicated CLUSTERDOWN error received from cluster.
+ By default Redis Cluster nodes stop accepting queries if they detect there
+ is at least a hash slot uncovered (no available node is serving it).
+ This way if the cluster is partially down (for example a range of hash
+ slots are no longer covered) the entire cluster eventually becomes
+ unavailable. It automatically returns available as soon as all the slots
+ are covered again.
+ """
+ def __init__(self, resp):
+ self.args = (resp,)
+ self.message = resp
+
+
+class AskError(ResponseError):
+ """
+ Error indicated ASK error received from cluster.
+ When a slot is set as MIGRATING, the node will accept all queries that
+ pertain to this hash slot, but only if the key in question exists,
+ otherwise the query is forwarded using a -ASK redirection to the node that
+ is target of the migration.
+ src node: MIGRATING to dst node
+ get > ASK error
+ ask dst node > ASKING command
+ dst node: IMPORTING from src node
+ asking command only affects next command
+ any op will be allowed after asking command
+ """
+
+ def __init__(self, resp):
+ """should only redirect to master node"""
+ self.args = (resp,)
+ self.message = resp
+ slot_id, new_node = resp.split(' ')
+ host, port = new_node.rsplit(':', 1)
+ self.slot_id = int(slot_id)
+ self.node_addr = self.host, self.port = host, int(port)
+
+
+class TryAgainError(ResponseError):
+ """
+ Error indicated TRYAGAIN error received from cluster.
+ Operations on keys that don't exist or are - during resharding - split
+ between the source and destination nodes, will generate a -TRYAGAIN error.
+ """
+ def __init__(self, *args, **kwargs):
+ pass
+
+
+class ClusterCrossSlotError(ResponseError):
+ """
+ Error indicated CROSSSLOT error received from cluster.
+ A CROSSSLOT error is generated when keys in a request don't hash to the
+ same slot.
+ """
+ message = "Keys in request don't hash to the same slot"
+
+
+class MovedError(AskError):
+ """
+ Error indicated MOVED error received from cluster.
+ A request sent to a node that doesn't serve this key will be replayed with
+ a MOVED error that points to the correct node.
+ """
+ pass
+
+
+class MasterDownError(ClusterDownError):
+ """
+ Error indicated MASTERDOWN error received from cluster.
+ Link with MASTER is down and replica-serve-stale-data is set to 'no'.
+ """
+ pass
+
+
+class SlotNotCoveredError(RedisClusterException):
+ """
+ This error only happens in the case where the connection pool will try to
+ fetch what node that is covered by a given slot.
+
+ If this error is raised the client should drop the current node layout and
+ attempt to reconnect and refresh the node layout again
+ """
+ pass
diff --git a/redis/sentinel.py b/redis/sentinel.py
index 17dd75b..06877bd 100644
--- a/redis/sentinel.py
+++ b/redis/sentinel.py
@@ -24,9 +24,9 @@ class SentinelManagedConnection(Connection):
def __repr__(self):
pool = self.connection_pool
- s = '%s<service=%s%%s>' % (type(self).__name__, pool.service_name)
+ s = f'{type(self).__name__}<service={pool.service_name}%s>'
if self.host:
- host_info = ',host=%s,port=%s' % (self.host, self.port)
+ host_info = f',host={self.host},port={self.port}'
s = s % host_info
return s
@@ -51,9 +51,9 @@ class SentinelManagedConnection(Connection):
continue
raise SlaveNotFoundError # Never be here
- def read_response(self):
+ def read_response(self, disable_decoding=False):
try:
- return super().read_response()
+ return super().read_response(disable_decoding=disable_decoding)
except ReadOnlyError:
if self.connection_pool.is_master:
# When talking to a master, a ReadOnlyError when likely
@@ -91,11 +91,8 @@ class SentinelConnectionPool(ConnectionPool):
self.sentinel_manager = sentinel_manager
def __repr__(self):
- return "%s<service=%s(%s)" % (
- type(self).__name__,
- self.service_name,
- self.is_master and 'master' or 'slave',
- )
+ role = 'master' if self.is_master else 'slave'
+ return f"{type(self).__name__}<service={self.service_name}({role})"
def reset(self):
super().reset()
@@ -106,7 +103,7 @@ class SentinelConnectionPool(ConnectionPool):
check = not self.is_master or \
(self.is_master and
self.master_address == (connection.host, connection.port))
- parent = super(SentinelConnectionPool, self)
+ parent = super()
return check and parent.owns_connection(connection)
def get_master_address(self):
@@ -136,10 +133,10 @@ class SentinelConnectionPool(ConnectionPool):
yield self.get_master_address()
except MasterNotFoundError:
pass
- raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
+ raise SlaveNotFoundError(f'No slave found for {self.service_name!r}')
-class Sentinel(SentinelCommands, object):
+class Sentinel(SentinelCommands):
"""
Redis Sentinel cluster client
@@ -205,13 +202,10 @@ class Sentinel(SentinelCommands, object):
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
- sentinel_addresses.append('%s:%s' % (
- sentinel.connection_pool.connection_kwargs['host'],
- sentinel.connection_pool.connection_kwargs['port'],
+ sentinel_addresses.append('{host}:{port}'.format_map(
+ sentinel.connection_pool.connection_kwargs,
))
- return '%s<sentinels=[%s]>' % (
- type(self).__name__,
- ','.join(sentinel_addresses))
+ return f'{type(self).__name__}<sentinels=[{",".join(sentinel_addresses)}]>'
def check_master_state(self, state, service_name):
if not state['is_master'] or state['is_sdown'] or state['is_odown']:
@@ -240,7 +234,7 @@ class Sentinel(SentinelCommands, object):
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel, self.sentinels[0])
return state['ip'], state['port']
- raise MasterNotFoundError("No master found for %r" % (service_name,))
+ raise MasterNotFoundError(f"No master found for {service_name!r}")
def filter_slaves(self, slaves):
"Remove slaves that are in an ODOWN or SDOWN state"
diff --git a/redis/utils.py b/redis/utils.py
index 26fb002..0e78cc5 100644
--- a/redis/utils.py
+++ b/redis/utils.py
@@ -36,3 +36,39 @@ def str_if_bytes(value):
def safe_str(value):
return str(str_if_bytes(value))
+
+
+def dict_merge(*dicts):
+ """
+ Merge all provided dicts into 1 dict.
+ *dicts : `dict`
+ dictionaries to merge
+ """
+ merged = {}
+
+ for d in dicts:
+ merged.update(d)
+
+ return merged
+
+
+def list_keys_to_dict(key_list, callback):
+ return dict.fromkeys(key_list, callback)
+
+
+def merge_result(command, res):
+ """
+ Merge all items in `res` into a list.
+
+ This command is used when sending a command to multiple nodes
+ and they result from each node should be merged into a single list.
+
+ res : 'dict'
+ """
+ result = set()
+
+ for v in res.values():
+ for value in v:
+ result.add(value)
+
+ return list(result)
diff --git a/requirements.txt b/requirements.txt
index 9f8d550..f1e7e7e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1,2 @@
deprecated
+packaging
diff --git a/setup.py b/setup.py
index 6c712bd..9acb501 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,8 @@ setup(
author_email="oss@redis.com",
python_requires=">=3.6",
install_requires=[
- 'deprecated'
+ 'deprecated==1.2.3',
+ 'packaging==21.3',
],
classifiers=[
"Development Status :: 5 - Production/Stable",
diff --git a/tasks.py b/tasks.py
index 306291c..8d9c4c6 100644
--- a/tasks.py
+++ b/tasks.py
@@ -16,11 +16,17 @@ def devenv(c):
clean(c)
cmd = 'tox -e devenv'
for d in dockers:
- cmd += " --docker-dont-stop={}".format(d)
+ cmd += f" --docker-dont-stop={d}"
run(cmd)
@task
+def build_docs(c):
+ """Generates the sphinx documentation."""
+ run("tox -e docs")
+
+
+@task
def linters(c):
"""Run code linters"""
run("tox -e linters")
@@ -40,7 +46,24 @@ def tests(c):
"""Run the redis-py test suite against the current python,
with and without hiredis.
"""
- run("tox -e plain -e hiredis")
+ print("Starting Redis tests")
+ run("tox -e '{standalone,cluster}'-'{plain,hiredis}'")
+
+
+@task
+def standalone_tests(c):
+ """Run all Redis tests against the current python,
+ with and without hiredis."""
+ print("Starting Redis tests")
+ run("tox -e standalone-'{hiredis}'")
+
+
+@task
+def cluster_tests(c):
+ """Run all Redis Cluster tests against the current python,
+ with and without hiredis."""
+ print("Starting RedisCluster tests")
+ run("tox -e cluster-'{plain,hiredis}'")
@task
@@ -50,7 +73,7 @@ def clean(c):
shutil.rmtree("build")
if os.path.isdir("dist"):
shutil.rmtree("dist")
- run("docker rm -f {}".format(' '.join(dockers)))
+ run(f"docker rm -f {' '.join(dockers)}")
@task
diff --git a/tests/conftest.py b/tests/conftest.py
index 9504333..8ed39ab 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,8 +3,10 @@ from redis.retry import Retry
import pytest
import random
import redis
+import time
from distutils.version import LooseVersion
from redis.connection import parse_url
+from redis.exceptions import RedisClusterException
from unittest.mock import Mock
from urllib.parse import urlparse
@@ -13,6 +15,7 @@ REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/9"
default_redismod_url = "redis://localhost:36379"
+default_cluster_nodes = 6
def pytest_addoption(parser):
@@ -27,11 +30,17 @@ def pytest_addoption(parser):
" with loaded modules,"
" defaults to `%(default)s`")
+ parser.addoption('--redis-cluster-nodes', default=default_cluster_nodes,
+ action="store",
+ help="The number of cluster nodes that need to be "
+ "available before the test can start,"
+ " defaults to `%(default)s`")
+
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
- cmds = [c[0].upper().decode() for c in client.command()]
+ cmds = [command.upper() for command in client.command().keys()]
if 'dping' in cmds:
info["enterprise"] = True
else:
@@ -45,8 +54,10 @@ def pytest_sessionstart(session):
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
+ cluster_enabled = info["cluster_enabled"]
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
+ REDIS_INFO["cluster_enabled"] = cluster_enabled
REDIS_INFO["enterprise"] = info["enterprise"]
# module info, if the second redis is running
@@ -59,13 +70,47 @@ def pytest_sessionstart(session):
except KeyError:
pass
+ if cluster_enabled:
+ cluster_nodes = session.config.getoption("--redis-cluster-nodes")
+ wait_for_cluster_creation(redis_url, cluster_nodes)
+
+
+def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=20):
+ """
+ Waits for the cluster creation to complete.
+ As soon as all :cluster_nodes: nodes become available, the cluster will be
+ considered ready.
+ :param redis_url: the cluster's url, e.g. redis://localhost:16379/0
+ :param cluster_nodes: The number of nodes in the cluster
+ :param timeout: the amount of time to wait (in seconds)
+ """
+ now = time.time()
+ end_time = now + timeout
+ client = None
+ print(f"Waiting for {cluster_nodes} cluster nodes to become available")
+ while now < end_time:
+ try:
+ client = redis.RedisCluster.from_url(redis_url)
+ if len(client.get_nodes()) == cluster_nodes:
+ print("All nodes are available!")
+ break
+ except RedisClusterException:
+ pass
+ time.sleep(1)
+ now = time.time()
+ if now >= end_time:
+ available_nodes = 0 if client is None else len(client.get_nodes())
+ raise RedisClusterException(
+ f"The cluster did not become available after {timeout} seconds. "
+ f"Only {available_nodes} nodes out of {cluster_nodes} are available")
+
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
check = LooseVersion(redis_version) < LooseVersion(min_version)
return pytest.mark.skipif(
check,
- reason="Redis version required >= {}".format(min_version))
+ reason=f"Redis version required >= {min_version}")
def skip_if_server_version_gte(min_version):
@@ -73,12 +118,12 @@ def skip_if_server_version_gte(min_version):
check = LooseVersion(redis_version) >= LooseVersion(min_version)
return pytest.mark.skipif(
check,
- reason="Redis version required < {}".format(min_version))
+ reason=f"Redis version required < {min_version}")
def skip_unless_arch_bits(arch_bits):
return pytest.mark.skipif(REDIS_INFO["arch_bits"] != arch_bits,
- reason="server is not {}-bit".format(arch_bits))
+ reason=f"server is not {arch_bits}-bit")
def skip_ifmodversion_lt(min_version: str, module_name: str):
@@ -97,18 +142,17 @@ def skip_ifmodversion_lt(min_version: str, module_name: str):
check = version < mv
return pytest.mark.skipif(check, reason="Redis module version")
- raise AttributeError("No redis module named {}".format(module_name))
+ raise AttributeError(f"No redis module named {module_name}")
def skip_if_redis_enterprise(func):
check = REDIS_INFO["enterprise"] is True
- return pytest.mark.skipif(check, reason="Redis enterprise"
- )
+ return pytest.mark.skipif(check, reason="Redis enterprise")
def skip_ifnot_redis_enterprise(func):
check = REDIS_INFO["enterprise"] is False
- return pytest.mark.skipif(check, reason="Redis enterprise")
+ return pytest.mark.skipif(check, reason="Not running in redis enterprise")
def _get_client(cls, request, single_connection_client=True, flushdb=True,
@@ -125,27 +169,47 @@ def _get_client(cls, request, single_connection_client=True, flushdb=True,
redis_url = request.config.getoption("--redis-url")
else:
redis_url = from_url
- url_options = parse_url(redis_url)
- url_options.update(kwargs)
- pool = redis.ConnectionPool(**url_options)
- client = cls(connection_pool=pool)
+ cluster_mode = REDIS_INFO["cluster_enabled"]
+ if not cluster_mode:
+ url_options = parse_url(redis_url)
+ url_options.update(kwargs)
+ pool = redis.ConnectionPool(**url_options)
+ client = cls(connection_pool=pool)
+ else:
+ client = redis.RedisCluster.from_url(redis_url, **kwargs)
+ single_connection_client = False
if single_connection_client:
client = client.client()
if request:
def teardown():
- if flushdb:
- try:
- client.flushdb()
- except redis.ConnectionError:
- # handle cases where a test disconnected a client
- # just manually retry the flushdb
- client.flushdb()
- client.close()
- client.connection_pool.disconnect()
+ if not cluster_mode:
+ if flushdb:
+ try:
+ client.flushdb()
+ except redis.ConnectionError:
+ # handle cases where a test disconnected a client
+ # just manually retry the flushdb
+ client.flushdb()
+ client.close()
+ client.connection_pool.disconnect()
+ else:
+ cluster_teardown(client, flushdb)
request.addfinalizer(teardown)
return client
+def cluster_teardown(client, flushdb):
+ if flushdb:
+ try:
+ client.flushdb(target_nodes='primaries')
+ except redis.ConnectionError:
+ # handle cases where a test disconnected a client
+ # just manually retry the flushdb
+ client.flushdb(target_nodes='primaries')
+ client.close()
+ client.disconnect_connection_pools()
+
+
# specifically set to the zero database, because creating
# an index on db != 0 raises a ResponseError in redis
@pytest.fixture()
@@ -254,8 +318,8 @@ def wait_for_command(client, monitor, command):
if LooseVersion(redis_version) >= LooseVersion('5.0.0'):
id_str = str(client.client_id())
else:
- id_str = '%08x' % random.randrange(2**32)
- key = '__REDIS-PY-%s__' % id_str
+ id_str = f'{random.randrange(2 ** 32):08x}'
+ key = f'__REDIS-PY-{id_str}__'
client.get(key)
while True:
monitor_response = monitor.next_command()
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
new file mode 100644
index 0000000..d12e47e
--- /dev/null
+++ b/tests/test_cluster.py
@@ -0,0 +1,2477 @@
+import binascii
+import datetime
+import pytest
+import warnings
+
+from time import sleep
+from tests.test_pubsub import wait_for_message
+from unittest.mock import call, patch, DEFAULT, Mock
+from redis import Redis
+from redis.cluster import get_node_name, ClusterNode, \
+ RedisCluster, NodesManager, PRIMARY, REDIS_CLUSTER_HASH_SLOTS, REPLICA
+from redis.commands import CommandsParser
+from redis.connection import Connection
+from redis.utils import str_if_bytes
+from redis.exceptions import (
+ AskError,
+ ClusterDownError,
+ DataError,
+ MovedError,
+ RedisClusterException,
+ RedisError
+)
+
+from redis.crc import key_slot
+from .conftest import (
+ _get_client,
+ skip_if_server_version_lt,
+ skip_unless_arch_bits
+)
+
+default_host = "127.0.0.1"
+default_port = 7000
+default_cluster_slots = [
+ [
+ 0, 8191,
+ ['127.0.0.1', 7000, 'node_0'],
+ ['127.0.0.1', 7003, 'node_3'],
+ ],
+ [
+ 8192, 16383,
+ ['127.0.0.1', 7001, 'node_1'],
+ ['127.0.0.1', 7002, 'node_2']
+ ]
+]
+
+
+@pytest.fixture()
+def slowlog(request, r):
+ """
+ Set the slowlog threshold to 0, and the
+ max length to 128. This will force every
+ command into the slowlog and allow us
+ to test it
+ """
+ # Save old values
+ current_config = r.config_get(
+ target_nodes=r.get_primaries()[0])
+ old_slower_than_value = current_config['slowlog-log-slower-than']
+ old_max_legnth_value = current_config['slowlog-max-len']
+
+ # Function to restore the old values
+ def cleanup():
+ r.config_set('slowlog-log-slower-than', old_slower_than_value)
+ r.config_set('slowlog-max-len', old_max_legnth_value)
+
+ request.addfinalizer(cleanup)
+
+ # Set the new values
+ r.config_set('slowlog-log-slower-than', 0)
+ r.config_set('slowlog-max-len', 128)
+
+
+def get_mocked_redis_client(func=None, *args, **kwargs):
+ """
+ Return a stable RedisCluster object that have deterministic
+ nodes and slots setup to remove the problem of different IP addresses
+ on different installations and machines.
+ """
+ cluster_slots = kwargs.pop('cluster_slots', default_cluster_slots)
+ coverage_res = kwargs.pop('coverage_result', 'yes')
+ with patch.object(Redis, 'execute_command') as execute_command_mock:
+ def execute_command(*_args, **_kwargs):
+ if _args[0] == 'CLUSTER SLOTS':
+ mock_cluster_slots = cluster_slots
+ return mock_cluster_slots
+ elif _args[0] == 'COMMAND':
+ return {'get': [], 'set': []}
+ elif _args[1] == 'cluster-require-full-coverage':
+ return {'cluster-require-full-coverage': coverage_res}
+ elif func is not None:
+ return func(*args, **kwargs)
+ else:
+ return execute_command_mock(*_args, **_kwargs)
+
+ execute_command_mock.side_effect = execute_command
+
+ with patch.object(CommandsParser, 'initialize',
+ autospec=True) as cmd_parser_initialize:
+
+ def cmd_init_mock(self, r):
+ self.commands = {'get': {'name': 'get', 'arity': 2,
+ 'flags': ['readonly',
+ 'fast'],
+ 'first_key_pos': 1,
+ 'last_key_pos': 1,
+ 'step_count': 1}}
+
+ cmd_parser_initialize.side_effect = cmd_init_mock
+
+ return RedisCluster(*args, **kwargs)
+
+
+def mock_node_resp(node, response):
+ connection = Mock()
+ connection.read_response.return_value = response
+ node.redis_connection.connection = connection
+ return node
+
+
+def mock_node_resp_func(node, func):
+ connection = Mock()
+ connection.read_response.side_effect = func
+ node.redis_connection.connection = connection
+ return node
+
+
+def mock_all_nodes_resp(rc, response):
+ for node in rc.get_nodes():
+ mock_node_resp(node, response)
+ return rc
+
+
+def find_node_ip_based_on_port(cluster_client, port):
+ for node in cluster_client.get_nodes():
+ if node.port == port:
+ return node.host
+
+
+def moved_redirection_helper(request, failover=False):
+ """
+ Test that the client handles MOVED response after a failover.
+ Redirection after a failover means that the redirection address is of a
+ replica that was promoted to a primary.
+
+ At first call it should return a MOVED ResponseError that will point
+ the client to the next server it should talk to.
+
+ Verify that:
+ 1. it tries to talk to the redirected node
+ 2. it updates the slot's primary to the redirected node
+
+ For a failover, also verify:
+ 3. the redirected node's server type updated to 'primary'
+ 4. the server type of the previous slot owner updated to 'replica'
+ """
+ rc = _get_client(RedisCluster, request, flushdb=False)
+ slot = 12182
+ redirect_node = None
+ # Get the current primary that holds this slot
+ prev_primary = rc.nodes_manager.get_node_from_slot(slot)
+ if failover:
+ if len(rc.nodes_manager.slots_cache[slot]) < 2:
+ warnings.warn("Skipping this test since it requires to have a "
+ "replica")
+ return
+ redirect_node = rc.nodes_manager.slots_cache[slot][1]
+ else:
+ # Use one of the primaries to be the redirected node
+ redirect_node = rc.get_primaries()[0]
+ r_host = redirect_node.host
+ r_port = redirect_node.port
+ with patch.object(Redis, 'parse_response') as parse_response:
+ def moved_redirect_effect(connection, *args, **options):
+ def ok_response(connection, *args, **options):
+ assert connection.host == r_host
+ assert connection.port == r_port
+
+ return "MOCK_OK"
+
+ parse_response.side_effect = ok_response
+ raise MovedError(f"{slot} {r_host}:{r_port}")
+
+ parse_response.side_effect = moved_redirect_effect
+ assert rc.execute_command("SET", "foo", "bar") == "MOCK_OK"
+ slot_primary = rc.nodes_manager.slots_cache[slot][0]
+ assert slot_primary == redirect_node
+ if failover:
+ assert rc.get_node(host=r_host, port=r_port).server_type == PRIMARY
+ assert prev_primary.server_type == REPLICA
+
+
+@pytest.mark.onlycluster
+class TestRedisClusterObj:
+ """
+ Tests for the RedisCluster class
+ """
+
+ def test_host_port_startup_node(self):
+ """
+ Test that it is possible to use host & port arguments as startup node
+ args
+ """
+ cluster = get_mocked_redis_client(host=default_host, port=default_port)
+ assert cluster.get_node(host=default_host,
+ port=default_port) is not None
+
+ def test_startup_nodes(self):
+ """
+ Test that it is possible to use startup_nodes
+ argument to init the cluster
+ """
+ port_1 = 7000
+ port_2 = 7001
+ startup_nodes = [ClusterNode(default_host, port_1),
+ ClusterNode(default_host, port_2)]
+ cluster = get_mocked_redis_client(startup_nodes=startup_nodes)
+ assert cluster.get_node(host=default_host, port=port_1) is not None \
+ and cluster.get_node(host=default_host, port=port_2) is not None
+
+ def test_empty_startup_nodes(self):
+ """
+ Test that exception is raised when empty providing empty startup_nodes
+ """
+ with pytest.raises(RedisClusterException) as ex:
+ RedisCluster(startup_nodes=[])
+
+ assert str(ex.value).startswith(
+ "RedisCluster requires at least one node to discover the "
+ "cluster"), str_if_bytes(ex.value)
+
+ def test_from_url(self, r):
+ redis_url = f"redis://{default_host}:{default_port}/0"
+ with patch.object(RedisCluster, 'from_url') as from_url:
+ def from_url_mocked(_url, **_kwargs):
+ return get_mocked_redis_client(url=_url, **_kwargs)
+
+ from_url.side_effect = from_url_mocked
+ cluster = RedisCluster.from_url(redis_url)
+ assert cluster.get_node(host=default_host,
+ port=default_port) is not None
+
+ def test_execute_command_errors(self, r):
+ """
+ Test that if no key is provided then exception should be raised.
+ """
+ with pytest.raises(RedisClusterException) as ex:
+ r.execute_command("GET")
+ assert str(ex.value).startswith("No way to dispatch this command to "
+ "Redis Cluster. Missing key.")
+
+ def test_execute_command_node_flag_primaries(self, r):
+ """
+ Test command execution with nodes flag PRIMARIES
+ """
+ primaries = r.get_primaries()
+ replicas = r.get_replicas()
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.PRIMARIES) is True
+ for primary in primaries:
+ conn = primary.redis_connection.connection
+ assert conn.read_response.called is True
+ for replica in replicas:
+ conn = replica.redis_connection.connection
+ assert conn.read_response.called is not True
+
+ def test_execute_command_node_flag_replicas(self, r):
+ """
+ Test command execution with nodes flag REPLICAS
+ """
+ replicas = r.get_replicas()
+ if not replicas:
+ r = get_mocked_redis_client(default_host, default_port)
+ primaries = r.get_primaries()
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.REPLICAS) is True
+ for replica in replicas:
+ conn = replica.redis_connection.connection
+ assert conn.read_response.called is True
+ for primary in primaries:
+ conn = primary.redis_connection.connection
+ assert conn.read_response.called is not True
+
+ def test_execute_command_node_flag_all_nodes(self, r):
+ """
+ Test command execution with nodes flag ALL_NODES
+ """
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.ALL_NODES) is True
+ for node in r.get_nodes():
+ conn = node.redis_connection.connection
+ assert conn.read_response.called is True
+
+ def test_execute_command_node_flag_random(self, r):
+ """
+ Test command execution with nodes flag RANDOM
+ """
+ mock_all_nodes_resp(r, 'PONG')
+ assert r.ping(RedisCluster.RANDOM) is True
+ called_count = 0
+ for node in r.get_nodes():
+ conn = node.redis_connection.connection
+ if conn.read_response.called is True:
+ called_count += 1
+ assert called_count == 1
+
+ def test_execute_command_default_node(self, r):
+ """
+ Test command execution without node flag is being executed on the
+ default node
+ """
+ def_node = r.get_default_node()
+ mock_node_resp(def_node, 'PONG')
+ assert r.ping() is True
+ conn = def_node.redis_connection.connection
+ assert conn.read_response.called
+
+ def test_ask_redirection(self, r):
+ """
+ Test that the server handles ASK response.
+
+ At first call it should return a ASK ResponseError that will point
+ the client to the next server it should talk to.
+
+ Important thing to verify is that it tries to talk to the second node.
+ """
+ redirect_node = r.get_nodes()[0]
+ with patch.object(Redis, 'parse_response') as parse_response:
+ def ask_redirect_effect(connection, *args, **options):
+ def ok_response(connection, *args, **options):
+ assert connection.host == redirect_node.host
+ assert connection.port == redirect_node.port
+
+ return "MOCK_OK"
+
+ parse_response.side_effect = ok_response
+ raise AskError(f"12182 {redirect_node.host}:{redirect_node.port}")
+
+ parse_response.side_effect = ask_redirect_effect
+
+ assert r.execute_command("SET", "foo", "bar") == "MOCK_OK"
+
+ def test_moved_redirection(self, request):
+ """
+ Test that the client handles MOVED response.
+ """
+ moved_redirection_helper(request, failover=False)
+
+ def test_moved_redirection_after_failover(self, request):
+ """
+ Test that the client handles MOVED response after a failover.
+ """
+ moved_redirection_helper(request, failover=True)
+
+ def test_refresh_using_specific_nodes(self, request):
+ """
+ Test making calls on specific nodes when the cluster has failed over to
+ another node
+ """
+ node_7006 = ClusterNode(host=default_host, port=7006,
+ server_type=PRIMARY)
+ node_7007 = ClusterNode(host=default_host, port=7007,
+ server_type=PRIMARY)
+ with patch.object(Redis, 'parse_response') as parse_response:
+ with patch.object(NodesManager, 'initialize', autospec=True) as \
+ initialize:
+ with patch.multiple(Connection,
+ send_command=DEFAULT,
+ connect=DEFAULT,
+ can_read=DEFAULT) as mocks:
+ # simulate 7006 as a failed node
+ def parse_response_mock(connection, command_name,
+ **options):
+ if connection.port == 7006:
+ parse_response.failed_calls += 1
+ raise ClusterDownError(
+ 'CLUSTERDOWN The cluster is '
+ 'down. Use CLUSTER INFO for '
+ 'more information')
+ elif connection.port == 7007:
+ parse_response.successful_calls += 1
+
+ def initialize_mock(self):
+ # start with all slots mapped to 7006
+ self.nodes_cache = {node_7006.name: node_7006}
+ self.default_node = node_7006
+ self.slots_cache = {}
+
+ for i in range(0, 16383):
+ self.slots_cache[i] = [node_7006]
+
+ # After the first connection fails, a reinitialize
+ # should follow the cluster to 7007
+ def map_7007(self):
+ self.nodes_cache = {
+ node_7007.name: node_7007}
+ self.default_node = node_7007
+ self.slots_cache = {}
+
+ for i in range(0, 16383):
+ self.slots_cache[i] = [node_7007]
+
+ # Change initialize side effect for the second call
+ initialize.side_effect = map_7007
+
+ parse_response.side_effect = parse_response_mock
+ parse_response.successful_calls = 0
+ parse_response.failed_calls = 0
+ initialize.side_effect = initialize_mock
+ mocks['can_read'].return_value = False
+ mocks['send_command'].return_value = "MOCK_OK"
+ mocks['connect'].return_value = None
+ with patch.object(CommandsParser, 'initialize',
+ autospec=True) as cmd_parser_initialize:
+
+ def cmd_init_mock(self, r):
+ self.commands = {'get': {'name': 'get', 'arity': 2,
+ 'flags': ['readonly',
+ 'fast'],
+ 'first_key_pos': 1,
+ 'last_key_pos': 1,
+ 'step_count': 1}}
+
+ cmd_parser_initialize.side_effect = cmd_init_mock
+
+ rc = _get_client(
+ RedisCluster, request, flushdb=False)
+ assert len(rc.get_nodes()) == 1
+ assert rc.get_node(node_name=node_7006.name) is not \
+ None
+
+ rc.get('foo')
+
+ # Cluster should now point to 7007, and there should be
+ # one failed and one successful call
+ assert len(rc.get_nodes()) == 1
+ assert rc.get_node(node_name=node_7007.name) is not \
+ None
+ assert rc.get_node(node_name=node_7006.name) is None
+ assert parse_response.failed_calls == 1
+ assert parse_response.successful_calls == 1
+
+ def test_reading_from_replicas_in_round_robin(self):
+ with patch.multiple(Connection, send_command=DEFAULT,
+ read_response=DEFAULT, _connect=DEFAULT,
+ can_read=DEFAULT, on_connect=DEFAULT) as mocks:
+ with patch.object(Redis, 'parse_response') as parse_response:
+ def parse_response_mock_first(connection, *args, **options):
+ # Primary
+ assert connection.port == 7001
+ parse_response.side_effect = parse_response_mock_second
+ return "MOCK_OK"
+
+ def parse_response_mock_second(connection, *args, **options):
+ # Replica
+ assert connection.port == 7002
+ parse_response.side_effect = parse_response_mock_third
+ return "MOCK_OK"
+
+ def parse_response_mock_third(connection, *args, **options):
+ # Primary
+ assert connection.port == 7001
+ return "MOCK_OK"
+
+ # We don't need to create a real cluster connection but we
+ # do want RedisCluster.on_connect function to get called,
+ # so we'll mock some of the Connection's functions to allow it
+ parse_response.side_effect = parse_response_mock_first
+ mocks['send_command'].return_value = True
+ mocks['read_response'].return_value = "OK"
+ mocks['_connect'].return_value = True
+ mocks['can_read'].return_value = False
+ mocks['on_connect'].return_value = True
+
+ # Create a cluster with reading from replications
+ read_cluster = get_mocked_redis_client(host=default_host,
+ port=default_port,
+ read_from_replicas=True)
+ assert read_cluster.read_from_replicas is True
+ # Check that we read from the slot's nodes in a round robin
+ # matter.
+ # 'foo' belongs to slot 12182 and the slot's nodes are:
+ # [(127.0.0.1,7001,primary), (127.0.0.1,7002,replica)]
+ read_cluster.get("foo")
+ read_cluster.get("foo")
+ read_cluster.get("foo")
+ mocks['send_command'].assert_has_calls([call('READONLY')])
+
+ def test_keyslot(self, r):
+ """
+ Test that method will compute correct key in all supported cases
+ """
+ assert r.keyslot("foo") == 12182
+ assert r.keyslot("{foo}bar") == 12182
+ assert r.keyslot("{foo}") == 12182
+ assert r.keyslot(1337) == 4314
+
+ assert r.keyslot(125) == r.keyslot(b"125")
+ assert r.keyslot(125) == r.keyslot("\x31\x32\x35")
+ assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
+ assert r.keyslot("大奖") == r.keyslot(b"\xe5\xa4\xa7\xe5\xa5\x96")
+ assert r.keyslot(1337.1234) == r.keyslot("1337.1234")
+ assert r.keyslot(1337) == r.keyslot("1337")
+ assert r.keyslot(b"abc") == r.keyslot("abc")
+
+ def test_get_node_name(self):
+ assert get_node_name(default_host, default_port) == \
+ f"{default_host}:{default_port}"
+
+ def test_all_nodes(self, r):
+ """
+ Set a list of nodes and it should be possible to iterate over all
+ """
+ nodes = [node for node in r.nodes_manager.nodes_cache.values()]
+
+ for i, node in enumerate(r.get_nodes()):
+ assert node in nodes
+
+ def test_all_nodes_masters(self, r):
+ """
+ Set a list of nodes with random primaries/replicas config and it shold
+ be possible to iterate over all of them.
+ """
+ nodes = [node for node in r.nodes_manager.nodes_cache.values()
+ if node.server_type == PRIMARY]
+
+ for node in r.get_primaries():
+ assert node in nodes
+
+ def test_cluster_down_overreaches_retry_attempts(self):
+ """
+ When ClusterDownError is thrown, test that we retry executing the
+ command as many times as configured in cluster_error_retry_attempts
+ and then raise the exception
+ """
+ with patch.object(RedisCluster, '_execute_command') as execute_command:
+ def raise_cluster_down_error(target_node, *args, **kwargs):
+ execute_command.failed_calls += 1
+ raise ClusterDownError(
+ 'CLUSTERDOWN The cluster is down. Use CLUSTER INFO for '
+ 'more information')
+
+ execute_command.side_effect = raise_cluster_down_error
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port)
+
+ with pytest.raises(ClusterDownError):
+ rc.get("bar")
+ assert execute_command.failed_calls == \
+ rc.cluster_error_retry_attempts
+
+ def test_connection_error_overreaches_retry_attempts(self):
+ """
+ When ConnectionError is thrown, test that we retry executing the
+ command as many times as configured in cluster_error_retry_attempts
+ and then raise the exception
+ """
+ with patch.object(RedisCluster, '_execute_command') as execute_command:
+ def raise_conn_error(target_node, *args, **kwargs):
+ execute_command.failed_calls += 1
+ raise ConnectionError()
+
+ execute_command.side_effect = raise_conn_error
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port)
+
+ with pytest.raises(ConnectionError):
+ rc.get("bar")
+ assert execute_command.failed_calls == \
+ rc.cluster_error_retry_attempts
+
+ def test_user_on_connect_function(self, request):
+ """
+ Test support in passing on_connect function by the user
+ """
+
+ def on_connect(connection):
+ assert connection is not None
+
+ mock = Mock(side_effect=on_connect)
+
+ _get_client(RedisCluster, request, redis_connect_func=mock)
+ assert mock.called is True
+
+ def test_set_default_node_success(self, r):
+ """
+ test successful replacement of the default cluster node
+ """
+ default_node = r.get_default_node()
+ # get a different node
+ new_def_node = None
+ for node in r.get_nodes():
+ if node != default_node:
+ new_def_node = node
+ break
+ assert r.set_default_node(new_def_node) is True
+ assert r.get_default_node() == new_def_node
+
+ def test_set_default_node_failure(self, r):
+ """
+ test failed replacement of the default cluster node
+ """
+ default_node = r.get_default_node()
+ new_def_node = ClusterNode('1.1.1.1', 1111)
+ assert r.set_default_node(None) is False
+ assert r.set_default_node(new_def_node) is False
+ assert r.get_default_node() == default_node
+
+ def test_get_node_from_key(self, r):
+ """
+ Test that get_node_from_key function returns the correct node
+ """
+ key = 'bar'
+ slot = r.keyslot(key)
+ slot_nodes = r.nodes_manager.slots_cache.get(slot)
+ primary = slot_nodes[0]
+ assert r.get_node_from_key(key, replica=False) == primary
+ replica = r.get_node_from_key(key, replica=True)
+ if replica is not None:
+ assert replica.server_type == REPLICA
+ assert replica in slot_nodes
+
+
+@pytest.mark.onlycluster
+class TestClusterRedisCommands:
+ """
+ Tests for RedisCluster unique commands
+ """
+
+ def test_case_insensitive_command_names(self, r):
+ assert r.cluster_response_callbacks['cluster addslots'] == \
+ r.cluster_response_callbacks['CLUSTER ADDSLOTS']
+
+ def test_get_and_set(self, r):
+ # get and set can't be tested independently of each other
+ assert r.get('a') is None
+ byte_string = b'value'
+ integer = 5
+ unicode_string = chr(3456) + 'abcd' + chr(3421)
+ assert r.set('byte_string', byte_string)
+ assert r.set('integer', 5)
+ assert r.set('unicode_string', unicode_string)
+ assert r.get('byte_string') == byte_string
+ assert r.get('integer') == str(integer).encode()
+ assert r.get('unicode_string').decode('utf-8') == unicode_string
+
+ def test_mget_nonatomic(self, r):
+ assert r.mget_nonatomic([]) == []
+ assert r.mget_nonatomic(['a', 'b']) == [None, None]
+ r['a'] = '1'
+ r['b'] = '2'
+ r['c'] = '3'
+
+ assert (r.mget_nonatomic('a', 'other', 'b', 'c') ==
+ [b'1', None, b'2', b'3'])
+
+ def test_mset_nonatomic(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ assert r.mset_nonatomic(d)
+ for k, v in d.items():
+ assert r[k] == v
+
+ def test_config_set(self, r):
+ assert r.config_set('slowlog-log-slower-than', 0)
+
+ def test_cluster_config_resetstat(self, r):
+ r.ping(target_nodes='all')
+ all_info = r.info(target_nodes='all')
+ prior_commands_processed = -1
+ for node_info in all_info.values():
+ prior_commands_processed = node_info['total_commands_processed']
+ assert prior_commands_processed >= 1
+ r.config_resetstat(target_nodes='all')
+ all_info = r.info(target_nodes='all')
+ for node_info in all_info.values():
+ reset_commands_processed = node_info['total_commands_processed']
+ assert reset_commands_processed < prior_commands_processed
+
+ def test_client_setname(self, r):
+ node = r.get_random_node()
+ r.client_setname('redis_py_test', target_nodes=node)
+ client_name = r.client_getname(target_nodes=node)
+ assert client_name == 'redis_py_test'
+
+ def test_exists(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.exists(*d.keys()) == len(d)
+
+ def test_delete(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.delete(*d.keys()) == len(d)
+ assert r.delete(*d.keys()) == 0
+
+ def test_touch(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.touch(*d.keys()) == len(d)
+
+ def test_unlink(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ r.mset_nonatomic(d)
+ assert r.unlink(*d.keys()) == len(d)
+ # Unlink is non-blocking so we sleep before
+ # verifying the deletion
+ sleep(0.1)
+ assert r.unlink(*d.keys()) == 0
+
+ def test_pubsub_channels_merge_results(self, r):
+ nodes = r.get_nodes()
+ channels = []
+ pubsub_nodes = []
+ i = 0
+ for node in nodes:
+ channel = f"foo{i}"
+ # We will create different pubsub clients where each one is
+ # connected to a different node
+ p = r.pubsub(node)
+ pubsub_nodes.append(p)
+ p.subscribe(channel)
+ b_channel = channel.encode('utf-8')
+ channels.append(b_channel)
+ # Assert that each node returns only the channel it subscribed to
+ sub_channels = node.redis_connection.pubsub_channels()
+ if not sub_channels:
+ # Try again after a short sleep
+ sleep(0.3)
+ sub_channels = node.redis_connection.pubsub_channels()
+ assert sub_channels == [b_channel]
+ i += 1
+ # Assert that the cluster's pubsub_channels function returns ALL of
+ # the cluster's channels
+ result = r.pubsub_channels(target_nodes='all')
+ result.sort()
+ assert result == channels
+
+ def test_pubsub_numsub_merge_results(self, r):
+ nodes = r.get_nodes()
+ pubsub_nodes = []
+ channel = "foo"
+ b_channel = channel.encode('utf-8')
+ for node in nodes:
+ # We will create different pubsub clients where each one is
+ # connected to a different node
+ p = r.pubsub(node)
+ pubsub_nodes.append(p)
+ p.subscribe(channel)
+ # Assert that each node returns that only one client is subscribed
+ sub_chann_num = node.redis_connection.pubsub_numsub(channel)
+ if sub_chann_num == [(b_channel, 0)]:
+ sleep(0.3)
+ sub_chann_num = node.redis_connection.pubsub_numsub(channel)
+ assert sub_chann_num == [(b_channel, 1)]
+ # Assert that the cluster's pubsub_numsub function returns ALL clients
+ # subscribed to this channel in the entire cluster
+ assert r.pubsub_numsub(channel, target_nodes='all') == \
+ [(b_channel, len(nodes))]
+
+ def test_pubsub_numpat_merge_results(self, r):
+ nodes = r.get_nodes()
+ pubsub_nodes = []
+ pattern = "foo*"
+ for node in nodes:
+ # We will create different pubsub clients where each one is
+ # connected to a different node
+ p = r.pubsub(node)
+ pubsub_nodes.append(p)
+ p.psubscribe(pattern)
+ # Assert that each node returns that only one client is subscribed
+ sub_num_pat = node.redis_connection.pubsub_numpat()
+ if sub_num_pat == 0:
+ sleep(0.3)
+ sub_num_pat = node.redis_connection.pubsub_numpat()
+ assert sub_num_pat == 1
+ # Assert that the cluster's pubsub_numsub function returns ALL clients
+ # subscribed to this channel in the entire cluster
+ assert r.pubsub_numpat(target_nodes='all') == len(nodes)
+
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_pubsub_channels(self, r):
+ p = r.pubsub()
+ p.subscribe('foo', 'bar', 'baz', 'quux')
+ for i in range(4):
+ assert wait_for_message(p, timeout=0.5)['type'] == 'subscribe'
+ expected = [b'bar', b'baz', b'foo', b'quux']
+ assert all([channel in r.pubsub_channels(target_nodes='all')
+ for channel in expected])
+
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_pubsub_numsub(self, r):
+ p1 = r.pubsub()
+ p1.subscribe('foo', 'bar', 'baz')
+ for i in range(3):
+ assert wait_for_message(p1, timeout=0.5)['type'] == 'subscribe'
+ p2 = r.pubsub()
+ p2.subscribe('bar', 'baz')
+ for i in range(2):
+ assert wait_for_message(p2, timeout=0.5)['type'] == 'subscribe'
+ p3 = r.pubsub()
+ p3.subscribe('baz')
+ assert wait_for_message(p3, timeout=0.5)['type'] == 'subscribe'
+
+ channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
+ assert r.pubsub_numsub('foo', 'bar', 'baz', target_nodes='all') \
+ == channels
+
+ def test_cluster_slots(self, r):
+ mock_all_nodes_resp(r, default_cluster_slots)
+ cluster_slots = r.cluster_slots()
+ assert isinstance(cluster_slots, dict)
+ assert len(default_cluster_slots) == len(cluster_slots)
+ assert cluster_slots.get((0, 8191)) is not None
+ assert cluster_slots.get((0, 8191)).get('primary') == \
+ ('127.0.0.1', 7000)
+
+ def test_cluster_addslots(self, r):
+ node = r.get_random_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_addslots(node, 1, 2, 3) is True
+
+ def test_cluster_countkeysinslot(self, r):
+ node = r.nodes_manager.get_node_from_slot(1)
+ mock_node_resp(node, 2)
+ assert r.cluster_countkeysinslot(1) == 2
+
+ def test_cluster_count_failure_report(self, r):
+ mock_all_nodes_resp(r, 0)
+ assert r.cluster_count_failure_report('node_0') == 0
+
+ def test_cluster_delslots(self):
+ cluster_slots = [
+ [
+ 0, 8191,
+ ['127.0.0.1', 7000, 'node_0'],
+ ],
+ [
+ 8192, 16383,
+ ['127.0.0.1', 7001, 'node_1'],
+ ]
+ ]
+ r = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots)
+ mock_all_nodes_resp(r, 'OK')
+ node0 = r.get_node(default_host, 7000)
+ node1 = r.get_node(default_host, 7001)
+ assert r.cluster_delslots(0, 8192) == [True, True]
+ assert node0.redis_connection.connection.read_response.called
+ assert node1.redis_connection.connection.read_response.called
+
+ def test_cluster_failover(self, r):
+ node = r.get_random_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_failover(node) is True
+ assert r.cluster_failover(node, 'FORCE') is True
+ assert r.cluster_failover(node, 'TAKEOVER') is True
+ with pytest.raises(RedisError):
+ r.cluster_failover(node, 'FORCT')
+
+ def test_cluster_info(self, r):
+ info = r.cluster_info()
+ assert isinstance(info, dict)
+ assert info['cluster_state'] == 'ok'
+
+ def test_cluster_keyslot(self, r):
+ mock_all_nodes_resp(r, 12182)
+ assert r.cluster_keyslot('foo') == 12182
+
+ def test_cluster_meet(self, r):
+ node = r.get_default_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_meet('127.0.0.1', 6379) is True
+
+ def test_cluster_nodes(self, r):
+ response = (
+ 'c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
+ 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
+ '1447836263059 5 connected\n'
+ '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
+ 'master - 0 1447836264065 0 connected\n'
+ 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
+ 'myself,master - 0 0 2 connected 5461-10922\n'
+ '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
+ 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
+ '1447836262556 3 connected\n'
+ '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
+ 'master - 0 1447836262555 7 connected 0-5460\n'
+ '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
+ 'master - 0 1447836263562 3 connected 10923-16383\n'
+ 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
+ 'master,fail - 1447829446956 1447829444948 1 disconnected\n'
+ )
+ mock_all_nodes_resp(r, response)
+ nodes = r.cluster_nodes()
+ assert len(nodes) == 7
+ assert nodes.get('172.17.0.7:7006') is not None
+ assert nodes.get('172.17.0.7:7006').get('node_id') == \
+ "c8253bae761cb1ecb2b61857d85dfe455a0fec8b"
+
+ def test_cluster_replicate(self, r):
+ node = r.get_random_node()
+ all_replicas = r.get_replicas()
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_replicate(node, 'c8253bae761cb61857d') is True
+ results = r.cluster_replicate(all_replicas, 'c8253bae761cb61857d')
+ if isinstance(results, dict):
+ for res in results.values():
+ assert res is True
+ else:
+ assert results is True
+
+ def test_cluster_reset(self, r):
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_reset() is True
+ assert r.cluster_reset(False) is True
+ all_results = r.cluster_reset(False, target_nodes='all')
+ for res in all_results.values():
+ assert res is True
+
+ def test_cluster_save_config(self, r):
+ node = r.get_random_node()
+ all_nodes = r.get_nodes()
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_save_config(node) is True
+ all_results = r.cluster_save_config(all_nodes)
+ for res in all_results.values():
+ assert res is True
+
+ def test_cluster_get_keys_in_slot(self, r):
+ response = [b'{foo}1', b'{foo}2']
+ node = r.nodes_manager.get_node_from_slot(12182)
+ mock_node_resp(node, response)
+ keys = r.cluster_get_keys_in_slot(12182, 4)
+ assert keys == response
+
+ def test_cluster_set_config_epoch(self, r):
+ mock_all_nodes_resp(r, 'OK')
+ assert r.cluster_set_config_epoch(3) is True
+ all_results = r.cluster_set_config_epoch(3, target_nodes='all')
+ for res in all_results.values():
+ assert res is True
+
+ def test_cluster_setslot(self, r):
+ node = r.get_random_node()
+ mock_node_resp(node, 'OK')
+ assert r.cluster_setslot(node, 'node_0', 1218, 'IMPORTING') is True
+ assert r.cluster_setslot(node, 'node_0', 1218, 'NODE') is True
+ assert r.cluster_setslot(node, 'node_0', 1218, 'MIGRATING') is True
+ with pytest.raises(RedisError):
+ r.cluster_failover(node, 'STABLE')
+ with pytest.raises(RedisError):
+ r.cluster_failover(node, 'STATE')
+
+ def test_cluster_setslot_stable(self, r):
+ node = r.nodes_manager.get_node_from_slot(12182)
+ mock_node_resp(node, 'OK')
+ assert r.cluster_setslot_stable(12182) is True
+ assert node.redis_connection.connection.read_response.called
+
+ def test_cluster_replicas(self, r):
+ response = [b'01eca22229cf3c652b6fca0d09ff6941e0d2e3 '
+ b'127.0.0.1:6377@16377 slave '
+ b'52611e796814b78e90ad94be9d769a4f668f9a 0 '
+ b'1634550063436 4 connected',
+ b'r4xfga22229cf3c652b6fca0d09ff69f3e0d4d '
+ b'127.0.0.1:6378@16378 slave '
+ b'52611e796814b78e90ad94be9d769a4f668f9a 0 '
+ b'1634550063436 4 connected']
+ mock_all_nodes_resp(r, response)
+ replicas = r.cluster_replicas('52611e796814b78e90ad94be9d769a4f668f9a')
+ assert replicas.get('127.0.0.1:6377') is not None
+ assert replicas.get('127.0.0.1:6378') is not None
+ assert replicas.get('127.0.0.1:6378').get('node_id') == \
+ 'r4xfga22229cf3c652b6fca0d09ff69f3e0d4d'
+
+ def test_readonly(self):
+ r = get_mocked_redis_client(host=default_host, port=default_port)
+ mock_all_nodes_resp(r, 'OK')
+ assert r.readonly() is True
+ all_replicas_results = r.readonly(target_nodes='replicas')
+ for res in all_replicas_results.values():
+ assert res is True
+ for replica in r.get_replicas():
+ assert replica.redis_connection.connection.read_response.called
+
+ def test_readwrite(self):
+ r = get_mocked_redis_client(host=default_host, port=default_port)
+ mock_all_nodes_resp(r, 'OK')
+ assert r.readwrite() is True
+ all_replicas_results = r.readwrite(target_nodes='replicas')
+ for res in all_replicas_results.values():
+ assert res is True
+ for replica in r.get_replicas():
+ assert replica.redis_connection.connection.read_response.called
+
+ def test_bgsave(self, r):
+ assert r.bgsave()
+ sleep(0.3)
+ assert r.bgsave(True)
+
+ def test_info(self, r):
+ # Map keys to same slot
+ r.set('x{1}', 1)
+ r.set('y{1}', 2)
+ r.set('z{1}', 3)
+ # Get node that handles the slot
+ slot = r.keyslot('x{1}')
+ node = r.nodes_manager.get_node_from_slot(slot)
+ # Run info on that node
+ info = r.info(target_nodes=node)
+ assert isinstance(info, dict)
+ assert info['db0']['keys'] == 3
+
+ def _init_slowlog_test(self, r, node):
+ slowlog_lim = r.config_get('slowlog-log-slower-than',
+ target_nodes=node)
+ assert r.config_set('slowlog-log-slower-than', 0, target_nodes=node) \
+ is True
+ return slowlog_lim['slowlog-log-slower-than']
+
+ def _teardown_slowlog_test(self, r, node, prev_limit):
+ assert r.config_set('slowlog-log-slower-than', prev_limit,
+ target_nodes=node) is True
+
+ def test_slowlog_get(self, r, slowlog):
+ unicode_string = chr(3456) + 'abcd' + chr(3421)
+ node = r.get_node_from_key(unicode_string)
+ slowlog_limit = self._init_slowlog_test(r, node)
+ assert r.slowlog_reset(target_nodes=node)
+ r.get(unicode_string)
+ slowlog = r.slowlog_get(target_nodes=node)
+ assert isinstance(slowlog, list)
+ commands = [log['command'] for log in slowlog]
+
+ get_command = b' '.join((b'GET', unicode_string.encode('utf-8')))
+ assert get_command in commands
+ assert b'SLOWLOG RESET' in commands
+
+ # the order should be ['GET <uni string>', 'SLOWLOG RESET'],
+ # but if other clients are executing commands at the same time, there
+ # could be commands, before, between, or after, so just check that
+ # the two we care about are in the appropriate order.
+ assert commands.index(get_command) < commands.index(b'SLOWLOG RESET')
+
+ # make sure other attributes are typed correctly
+ assert isinstance(slowlog[0]['start_time'], int)
+ assert isinstance(slowlog[0]['duration'], int)
+ # rollback the slowlog limit to its original value
+ self._teardown_slowlog_test(r, node, slowlog_limit)
+
+ def test_slowlog_get_limit(self, r, slowlog):
+ assert r.slowlog_reset()
+ node = r.get_node_from_key('foo')
+ slowlog_limit = self._init_slowlog_test(r, node)
+ r.get('foo')
+ slowlog = r.slowlog_get(1, target_nodes=node)
+ assert isinstance(slowlog, list)
+ # only one command, based on the number we passed to slowlog_get()
+ assert len(slowlog) == 1
+ self._teardown_slowlog_test(r, node, slowlog_limit)
+
+ def test_slowlog_length(self, r, slowlog):
+ r.get('foo')
+ node = r.nodes_manager.get_node_from_slot(key_slot(b'foo'))
+ slowlog_len = r.slowlog_len(target_nodes=node)
+ assert isinstance(slowlog_len, int)
+
+ def test_time(self, r):
+ t = r.time(target_nodes=r.get_primaries()[0])
+ assert len(t) == 2
+ assert isinstance(t[0], int)
+ assert isinstance(t[1], int)
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_usage(self, r):
+ r.set('foo', 'bar')
+ assert isinstance(r.memory_usage('foo'), int)
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_malloc_stats(self, r):
+ assert r.memory_malloc_stats()
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_stats(self, r):
+ # put a key into the current db to make sure that "db.<current-db>"
+ # has data
+ r.set('foo', 'bar')
+ node = r.nodes_manager.get_node_from_slot(key_slot(b'foo'))
+ stats = r.memory_stats(target_nodes=node)
+ assert isinstance(stats, dict)
+ for key, value in stats.items():
+ if key.startswith('db.'):
+ assert isinstance(value, dict)
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_help(self, r):
+ with pytest.raises(NotImplementedError):
+ r.memory_help()
+
+ @skip_if_server_version_lt('4.0.0')
+ def test_memory_doctor(self, r):
+ with pytest.raises(NotImplementedError):
+ r.memory_doctor()
+
+ def test_lastsave(self, r):
+ node = r.get_primaries()[0]
+ assert isinstance(r.lastsave(target_nodes=node),
+ datetime.datetime)
+
+ def test_cluster_echo(self, r):
+ node = r.get_primaries()[0]
+ assert r.echo('foo bar', node) == b'foo bar'
+
+ @skip_if_server_version_lt('1.0.0')
+ def test_debug_segfault(self, r):
+ with pytest.raises(NotImplementedError):
+ r.debug_segfault()
+
+ def test_config_resetstat(self, r):
+ node = r.get_primaries()[0]
+ r.ping(target_nodes=node)
+ prior_commands_processed = \
+ int(r.info(target_nodes=node)['total_commands_processed'])
+ assert prior_commands_processed >= 1
+ r.config_resetstat(target_nodes=node)
+ reset_commands_processed = \
+ int(r.info(target_nodes=node)['total_commands_processed'])
+ assert reset_commands_processed < prior_commands_processed
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_client_trackinginfo(self, r):
+ node = r.get_primaries()[0]
+ res = r.client_trackinginfo(target_nodes=node)
+ assert len(res) > 2
+ assert 'prefixes' in res
+
+ @skip_if_server_version_lt('2.9.50')
+ def test_client_pause(self, r):
+ node = r.get_primaries()[0]
+ assert r.client_pause(1, target_nodes=node)
+ assert r.client_pause(timeout=1, target_nodes=node)
+ with pytest.raises(RedisError):
+ r.client_pause(timeout='not an integer', target_nodes=node)
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_client_unpause(self, r):
+ assert r.client_unpause()
+
+ @skip_if_server_version_lt('5.0.0')
+ def test_client_id(self, r):
+ node = r.get_primaries()[0]
+ assert r.client_id(target_nodes=node) > 0
+
+ @skip_if_server_version_lt('5.0.0')
+ def test_client_unblock(self, r):
+ node = r.get_primaries()[0]
+ myid = r.client_id(target_nodes=node)
+ assert not r.client_unblock(myid, target_nodes=node)
+ assert not r.client_unblock(myid, error=True, target_nodes=node)
+ assert not r.client_unblock(myid, error=False, target_nodes=node)
+
+ @skip_if_server_version_lt('6.0.0')
+ def test_client_getredir(self, r):
+ node = r.get_primaries()[0]
+ assert isinstance(r.client_getredir(target_nodes=node), int)
+ assert r.client_getredir(target_nodes=node) == -1
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_client_info(self, r):
+ node = r.get_primaries()[0]
+ info = r.client_info(target_nodes=node)
+ assert isinstance(info, dict)
+ assert 'addr' in info
+
+ @skip_if_server_version_lt('2.6.9')
+ def test_client_kill(self, r, r2):
+ node = r.get_primaries()[0]
+ r.client_setname('redis-py-c1', target_nodes='all')
+ r2.client_setname('redis-py-c2', target_nodes='all')
+ clients = [client for client in r.client_list(target_nodes=node)
+ if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ assert len(clients) == 2
+ clients_by_name = {client.get('name'): client for client in clients}
+
+ client_addr = clients_by_name['redis-py-c2'].get('addr')
+ assert r.client_kill(client_addr, target_nodes=node) is True
+
+ clients = [client for client in r.client_list(target_nodes=node)
+ if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
+ assert len(clients) == 1
+ assert clients[0].get('name') == 'redis-py-c1'
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_not_empty_string(self, r):
+ r['{foo}a'] = ''
+ r.bitop('not', '{foo}r', '{foo}a')
+ assert r.get('{foo}r') is None
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_not(self, r):
+ test_str = b'\xAA\x00\xFF\x55'
+ correct = ~0xAA00FF55 & 0xFFFFFFFF
+ r['{foo}a'] = test_str
+ r.bitop('not', '{foo}r', '{foo}a')
+ assert int(binascii.hexlify(r['{foo}r']), 16) == correct
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_not_in_place(self, r):
+ test_str = b'\xAA\x00\xFF\x55'
+ correct = ~0xAA00FF55 & 0xFFFFFFFF
+ r['{foo}a'] = test_str
+ r.bitop('not', '{foo}a', '{foo}a')
+ assert int(binascii.hexlify(r['{foo}a']), 16) == correct
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_single_string(self, r):
+ test_str = b'\x01\x02\xFF'
+ r['{foo}a'] = test_str
+ r.bitop('and', '{foo}res1', '{foo}a')
+ r.bitop('or', '{foo}res2', '{foo}a')
+ r.bitop('xor', '{foo}res3', '{foo}a')
+ assert r['{foo}res1'] == test_str
+ assert r['{foo}res2'] == test_str
+ assert r['{foo}res3'] == test_str
+
+ @skip_if_server_version_lt('2.6.0')
+ def test_cluster_bitop_string_operands(self, r):
+ r['{foo}a'] = b'\x01\x02\xFF\xFF'
+ r['{foo}b'] = b'\x01\x02\xFF'
+ r.bitop('and', '{foo}res1', '{foo}a', '{foo}b')
+ r.bitop('or', '{foo}res2', '{foo}a', '{foo}b')
+ r.bitop('xor', '{foo}res3', '{foo}a', '{foo}b')
+ assert int(binascii.hexlify(r['{foo}res1']), 16) == 0x0102FF00
+ assert int(binascii.hexlify(r['{foo}res2']), 16) == 0x0102FFFF
+ assert int(binascii.hexlify(r['{foo}res3']), 16) == 0x000000FF
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_copy(self, r):
+ assert r.copy("{foo}a", "{foo}b") == 0
+ r.set("{foo}a", "bar")
+ assert r.copy("{foo}a", "{foo}b") == 1
+ assert r.get("{foo}a") == b"bar"
+ assert r.get("{foo}b") == b"bar"
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_copy_and_replace(self, r):
+ r.set("{foo}a", "foo1")
+ r.set("{foo}b", "foo2")
+ assert r.copy("{foo}a", "{foo}b") == 0
+ assert r.copy("{foo}a", "{foo}b", replace=True) == 1
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_lmove(self, r):
+ r.rpush('{foo}a', 'one', 'two', 'three', 'four')
+ assert r.lmove('{foo}a', '{foo}b')
+ assert r.lmove('{foo}a', '{foo}b', 'right', 'left')
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_blmove(self, r):
+ r.rpush('{foo}a', 'one', 'two', 'three', 'four')
+ assert r.blmove('{foo}a', '{foo}b', 5)
+ assert r.blmove('{foo}a', '{foo}b', 1, 'RIGHT', 'LEFT')
+
+ def test_cluster_msetnx(self, r):
+ d = {'{foo}a': b'1', '{foo}b': b'2', '{foo}c': b'3'}
+ assert r.msetnx(d)
+ d2 = {'{foo}a': b'x', '{foo}d': b'4'}
+ assert not r.msetnx(d2)
+ for k, v in d.items():
+ assert r[k] == v
+ assert r.get('{foo}d') is None
+
+ def test_cluster_rename(self, r):
+ r['{foo}a'] = '1'
+ assert r.rename('{foo}a', '{foo}b')
+ assert r.get('{foo}a') is None
+ assert r['{foo}b'] == b'1'
+
+ def test_cluster_renamenx(self, r):
+ r['{foo}a'] = '1'
+ r['{foo}b'] = '2'
+ assert not r.renamenx('{foo}a', '{foo}b')
+ assert r['{foo}a'] == b'1'
+ assert r['{foo}b'] == b'2'
+
+ # LIST COMMANDS
+ def test_cluster_blpop(self, r):
+ r.rpush('{foo}a', '1', '2')
+ r.rpush('{foo}b', '3', '4')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'3')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'4')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'1')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'2')
+ assert r.blpop(['{foo}b', '{foo}a'], timeout=1) is None
+ r.rpush('{foo}c', '1')
+ assert r.blpop('{foo}c', timeout=1) == (b'{foo}c', b'1')
+
+ def test_cluster_brpop(self, r):
+ r.rpush('{foo}a', '1', '2')
+ r.rpush('{foo}b', '3', '4')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'4')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}b', b'3')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'2')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) == (b'{foo}a', b'1')
+ assert r.brpop(['{foo}b', '{foo}a'], timeout=1) is None
+ r.rpush('{foo}c', '1')
+ assert r.brpop('{foo}c', timeout=1) == (b'{foo}c', b'1')
+
+ def test_cluster_brpoplpush(self, r):
+ r.rpush('{foo}a', '1', '2')
+ r.rpush('{foo}b', '3', '4')
+ assert r.brpoplpush('{foo}a', '{foo}b') == b'2'
+ assert r.brpoplpush('{foo}a', '{foo}b') == b'1'
+ assert r.brpoplpush('{foo}a', '{foo}b', timeout=1) is None
+ assert r.lrange('{foo}a', 0, -1) == []
+ assert r.lrange('{foo}b', 0, -1) == [b'1', b'2', b'3', b'4']
+
+ def test_cluster_brpoplpush_empty_string(self, r):
+ r.rpush('{foo}a', '')
+ assert r.brpoplpush('{foo}a', '{foo}b') == b''
+
+ def test_cluster_rpoplpush(self, r):
+ r.rpush('{foo}a', 'a1', 'a2', 'a3')
+ r.rpush('{foo}b', 'b1', 'b2', 'b3')
+ assert r.rpoplpush('{foo}a', '{foo}b') == b'a3'
+ assert r.lrange('{foo}a', 0, -1) == [b'a1', b'a2']
+ assert r.lrange('{foo}b', 0, -1) == [b'a3', b'b1', b'b2', b'b3']
+
+ def test_cluster_sdiff(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sdiff('{foo}a', '{foo}b') == {b'1', b'2', b'3'}
+ r.sadd('{foo}b', '2', '3')
+ assert r.sdiff('{foo}a', '{foo}b') == {b'1'}
+
+ def test_cluster_sdiffstore(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sdiffstore('{foo}c', '{foo}a', '{foo}b') == 3
+ assert r.smembers('{foo}c') == {b'1', b'2', b'3'}
+ r.sadd('{foo}b', '2', '3')
+ assert r.sdiffstore('{foo}c', '{foo}a', '{foo}b') == 1
+ assert r.smembers('{foo}c') == {b'1'}
+
+ def test_cluster_sinter(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sinter('{foo}a', '{foo}b') == set()
+ r.sadd('{foo}b', '2', '3')
+ assert r.sinter('{foo}a', '{foo}b') == {b'2', b'3'}
+
+ def test_cluster_sinterstore(self, r):
+ r.sadd('{foo}a', '1', '2', '3')
+ assert r.sinterstore('{foo}c', '{foo}a', '{foo}b') == 0
+ assert r.smembers('{foo}c') == set()
+ r.sadd('{foo}b', '2', '3')
+ assert r.sinterstore('{foo}c', '{foo}a', '{foo}b') == 2
+ assert r.smembers('{foo}c') == {b'2', b'3'}
+
+ def test_cluster_smove(self, r):
+ r.sadd('{foo}a', 'a1', 'a2')
+ r.sadd('{foo}b', 'b1', 'b2')
+ assert r.smove('{foo}a', '{foo}b', 'a1')
+ assert r.smembers('{foo}a') == {b'a2'}
+ assert r.smembers('{foo}b') == {b'b1', b'b2', b'a1'}
+
+ def test_cluster_sunion(self, r):
+ r.sadd('{foo}a', '1', '2')
+ r.sadd('{foo}b', '2', '3')
+ assert r.sunion('{foo}a', '{foo}b') == {b'1', b'2', b'3'}
+
+ def test_cluster_sunionstore(self, r):
+ r.sadd('{foo}a', '1', '2')
+ r.sadd('{foo}b', '2', '3')
+ assert r.sunionstore('{foo}c', '{foo}a', '{foo}b') == 3
+ assert r.smembers('{foo}c') == {b'1', b'2', b'3'}
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zdiff(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 1, 'a2': 2})
+ assert r.zdiff(['{foo}a', '{foo}b']) == [b'a3']
+ assert r.zdiff(['{foo}a', '{foo}b'], withscores=True) == [b'a3', b'3']
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zdiffstore(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 1, 'a2': 2})
+ assert r.zdiffstore("{foo}out", ['{foo}a', '{foo}b'])
+ assert r.zrange("{foo}out", 0, -1) == [b'a3']
+ assert r.zrange("{foo}out", 0, -1, withscores=True) == [(b'a3', 3.0)]
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zinter(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c']) == [b'a3', b'a1']
+ # invalid aggregation
+ with pytest.raises(DataError):
+ r.zinter(['{foo}a', '{foo}b', '{foo}c'],
+ aggregate='foo', withscores=True)
+ # aggregate with SUM
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], withscores=True) \
+ == [(b'a3', 8), (b'a1', 9)]
+ # aggregate with MAX
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX',
+ withscores=True) \
+ == [(b'a3', 5), (b'a1', 6)]
+ # aggregate with MIN
+ assert r.zinter(['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN',
+ withscores=True) \
+ == [(b'a1', 1), (b'a3', 1)]
+ # with weights
+ assert r.zinter({'{foo}a': 1, '{foo}b': 2, '{foo}c': 3},
+ withscores=True) \
+ == [(b'a3', 20), (b'a1', 23)]
+
+ def test_cluster_zinterstore_sum(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore('{foo}d', ['{foo}a', '{foo}b', '{foo}c']) == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a3', 8), (b'a1', 9)]
+
+ def test_cluster_zinterstore_max(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX') == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a3', 5), (b'a1', 6)]
+
+ def test_cluster_zinterstore_min(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 3, 'a3': 5})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN') == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a1', 1), (b'a3', 3)]
+
+ def test_cluster_zinterstore_with_weight(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zinterstore(
+ '{foo}d', {'{foo}a': 1, '{foo}b': 2, '{foo}c': 3}) == 2
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a3', 20), (b'a1', 23)]
+
+ @skip_if_server_version_lt('4.9.0')
+ def test_cluster_bzpopmax(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2})
+ r.zadd('{foo}b', {'b1': 10, 'b2': 20})
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b2', 20)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b1', 10)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a2', 2)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a1', 1)
+ assert r.bzpopmax(['{foo}b', '{foo}a'], timeout=1) is None
+ r.zadd('{foo}c', {'c1': 100})
+ assert r.bzpopmax('{foo}c', timeout=1) == (b'{foo}c', b'c1', 100)
+
+ @skip_if_server_version_lt('4.9.0')
+ def test_cluster_bzpopmin(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2})
+ r.zadd('{foo}b', {'b1': 10, 'b2': 20})
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b1', 10)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}b', b'b2', 20)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a1', 1)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) == (
+ b'{foo}a', b'a2', 2)
+ assert r.bzpopmin(['{foo}b', '{foo}a'], timeout=1) is None
+ r.zadd('{foo}c', {'c1': 100})
+ assert r.bzpopmin('{foo}c', timeout=1) == (b'{foo}c', b'c1', 100)
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zrangestore(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ assert r.zrangestore('{foo}b', '{foo}a', 0, 1)
+ assert r.zrange('{foo}b', 0, -1) == [b'a1', b'a2']
+ assert r.zrangestore('{foo}b', '{foo}a', 1, 2)
+ assert r.zrange('{foo}b', 0, -1) == [b'a2', b'a3']
+ assert r.zrange('{foo}b', 0, -1, withscores=True) == \
+ [(b'a2', 2), (b'a3', 3)]
+ # reversed order
+ assert r.zrangestore('{foo}b', '{foo}a', 1, 2, desc=True)
+ assert r.zrange('{foo}b', 0, -1) == [b'a1', b'a2']
+ # by score
+ assert r.zrangestore('{foo}b', '{foo}a', 2, 1, byscore=True,
+ offset=0, num=1, desc=True)
+ assert r.zrange('{foo}b', 0, -1) == [b'a2']
+ # by lex
+ assert r.zrangestore('{foo}b', '{foo}a', '[a2', '(a3', bylex=True,
+ offset=0, num=1)
+ assert r.zrange('{foo}b', 0, -1) == [b'a2']
+
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_zunion(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ # sum
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c']) == \
+ [b'a2', b'a4', b'a3', b'a1']
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], withscores=True) == \
+ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ # max
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX',
+ withscores=True) \
+ == [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ # min
+ assert r.zunion(['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN',
+ withscores=True) \
+ == [(b'a1', 1), (b'a2', 1), (b'a3', 1), (b'a4', 4)]
+ # with weight
+ assert r.zunion({'{foo}a': 1, '{foo}b': 2, '{foo}c': 3},
+ withscores=True) \
+ == [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+
+ def test_cluster_zunionstore_sum(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore('{foo}d', ['{foo}a', '{foo}b', '{foo}c']) == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+
+ def test_cluster_zunionstore_max(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MAX') == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+
+ def test_cluster_zunionstore_min(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 2, 'a3': 3})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 4})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore(
+ '{foo}d', ['{foo}a', '{foo}b', '{foo}c'], aggregate='MIN') == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)]
+
+ def test_cluster_zunionstore_with_weight(self, r):
+ r.zadd('{foo}a', {'a1': 1, 'a2': 1, 'a3': 1})
+ r.zadd('{foo}b', {'a1': 2, 'a2': 2, 'a3': 2})
+ r.zadd('{foo}c', {'a1': 6, 'a3': 5, 'a4': 4})
+ assert r.zunionstore(
+ '{foo}d', {'{foo}a': 1, '{foo}b': 2, '{foo}c': 3}) == 4
+ assert r.zrange('{foo}d', 0, -1, withscores=True) == \
+ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+
+ @skip_if_server_version_lt('2.8.9')
+ def test_cluster_pfcount(self, r):
+ members = {b'1', b'2', b'3'}
+ r.pfadd('{foo}a', *members)
+ assert r.pfcount('{foo}a') == len(members)
+ members_b = {b'2', b'3', b'4'}
+ r.pfadd('{foo}b', *members_b)
+ assert r.pfcount('{foo}b') == len(members_b)
+ assert r.pfcount('{foo}a', '{foo}b') == len(members_b.union(members))
+
+ @skip_if_server_version_lt('2.8.9')
+ def test_cluster_pfmerge(self, r):
+ mema = {b'1', b'2', b'3'}
+ memb = {b'2', b'3', b'4'}
+ memc = {b'5', b'6', b'7'}
+ r.pfadd('{foo}a', *mema)
+ r.pfadd('{foo}b', *memb)
+ r.pfadd('{foo}c', *memc)
+ r.pfmerge('{foo}d', '{foo}c', '{foo}a')
+ assert r.pfcount('{foo}d') == 6
+ r.pfmerge('{foo}d', '{foo}b')
+ assert r.pfcount('{foo}d') == 7
+
+ def test_cluster_sort_store(self, r):
+ r.rpush('{foo}a', '2', '3', '1')
+ assert r.sort('{foo}a', store='{foo}sorted_values') == 3
+ assert r.lrange('{foo}sorted_values', 0, -1) == [b'1', b'2', b'3']
+
+ # GEO COMMANDS
+ @skip_if_server_version_lt('6.2.0')
+ def test_cluster_geosearchstore(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.geosearchstore('{foo}places_barcelona', '{foo}barcelona',
+ longitude=2.191, latitude=41.433, radius=1000)
+ assert r.zrange('{foo}places_barcelona', 0, -1) == [b'place1']
+
+ @skip_unless_arch_bits(64)
+ @skip_if_server_version_lt('6.2.0')
+ def test_geosearchstore_dist(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.geosearchstore('{foo}places_barcelona', '{foo}barcelona',
+ longitude=2.191, latitude=41.433,
+ radius=1000, storedist=True)
+ # instead of save the geo score, the distance is saved.
+ assert r.zscore('{foo}places_barcelona', 'place1') == 88.05060698409301
+
+ @skip_if_server_version_lt('3.2.0')
+ def test_cluster_georadius_store(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.georadius('{foo}barcelona', 2.191, 41.433,
+ 1000, store='{foo}places_barcelona')
+ assert r.zrange('{foo}places_barcelona', 0, -1) == [b'place1']
+
+ @skip_unless_arch_bits(64)
+ @skip_if_server_version_lt('3.2.0')
+ def test_cluster_georadius_store_dist(self, r):
+ values = (2.1909389952632, 41.433791470673, 'place1') + \
+ (2.1873744593677, 41.406342043777, 'place2')
+
+ r.geoadd('{foo}barcelona', values)
+ r.georadius('{foo}barcelona', 2.191, 41.433, 1000,
+ store_dist='{foo}places_barcelona')
+ # instead of save the geo score, the distance is saved.
+ assert r.zscore('{foo}places_barcelona', 'place1') == 88.05060698409301
+
+ def test_cluster_dbsize(self, r):
+ d = {'a': b'1', 'b': b'2', 'c': b'3', 'd': b'4'}
+ assert r.mset_nonatomic(d)
+ assert r.dbsize(target_nodes='primaries') == len(d)
+
+ def test_cluster_keys(self, r):
+ assert r.keys() == []
+ keys_with_underscores = {b'test_a', b'test_b'}
+ keys = keys_with_underscores.union({b'testc'})
+ for key in keys:
+ r[key] = 1
+ assert set(r.keys(pattern='test_*', target_nodes='primaries')) == \
+ keys_with_underscores
+ assert set(r.keys(pattern='test*', target_nodes='primaries')) == keys
+
+ # SCAN COMMANDS
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_scan(self, r):
+ r.set('a', 1)
+ r.set('b', 2)
+ r.set('c', 3)
+ cursor, keys = r.scan(target_nodes='primaries')
+ assert cursor == 0
+ assert set(keys) == {b'a', b'b', b'c'}
+ _, keys = r.scan(match='a', target_nodes='primaries')
+ assert set(keys) == {b'a'}
+
+ @skip_if_server_version_lt("6.0.0")
+ def test_cluster_scan_type(self, r):
+ r.sadd('a-set', 1)
+ r.hset('a-hash', 'foo', 2)
+ r.lpush('a-list', 'aux', 3)
+ _, keys = r.scan(match='a*', _type='SET', target_nodes='primaries')
+ assert set(keys) == {b'a-set'}
+
+ @skip_if_server_version_lt('2.8.0')
+ def test_cluster_scan_iter(self, r):
+ r.set('a', 1)
+ r.set('b', 2)
+ r.set('c', 3)
+ keys = list(r.scan_iter(target_nodes='primaries'))
+ assert set(keys) == {b'a', b'b', b'c'}
+ keys = list(r.scan_iter(match='a', target_nodes='primaries'))
+ assert set(keys) == {b'a'}
+
+ def test_cluster_randomkey(self, r):
+ node = r.get_node_from_key('{foo}')
+ assert r.randomkey(target_nodes=node) is None
+ for key in ('{foo}a', '{foo}b', '{foo}c'):
+ r[key] = 1
+ assert r.randomkey(target_nodes=node) in \
+ (b'{foo}a', b'{foo}b', b'{foo}c')
+
+
+@pytest.mark.onlycluster
+class TestNodesManager:
+ """
+ Tests for the NodesManager class
+ """
+
+ def test_load_balancer(self, r):
+ n_manager = r.nodes_manager
+ lb = n_manager.read_load_balancer
+ slot_1 = 1257
+ slot_2 = 8975
+ node_1 = ClusterNode(default_host, 6379, PRIMARY)
+ node_2 = ClusterNode(default_host, 6378, REPLICA)
+ node_3 = ClusterNode(default_host, 6377, REPLICA)
+ node_4 = ClusterNode(default_host, 6376, PRIMARY)
+ node_5 = ClusterNode(default_host, 6375, REPLICA)
+ n_manager.slots_cache = {
+ slot_1: [node_1, node_2, node_3],
+ slot_2: [node_4, node_5]
+ }
+ primary1_name = n_manager.slots_cache[slot_1][0].name
+ primary2_name = n_manager.slots_cache[slot_2][0].name
+ list1_size = len(n_manager.slots_cache[slot_1])
+ list2_size = len(n_manager.slots_cache[slot_2])
+ # slot 1
+ assert lb.get_server_index(primary1_name, list1_size) == 0
+ assert lb.get_server_index(primary1_name, list1_size) == 1
+ assert lb.get_server_index(primary1_name, list1_size) == 2
+ assert lb.get_server_index(primary1_name, list1_size) == 0
+ # slot 2
+ assert lb.get_server_index(primary2_name, list2_size) == 0
+ assert lb.get_server_index(primary2_name, list2_size) == 1
+ assert lb.get_server_index(primary2_name, list2_size) == 0
+
+ lb.reset()
+ assert lb.get_server_index(primary1_name, list1_size) == 0
+ assert lb.get_server_index(primary2_name, list2_size) == 0
+
+ def test_init_slots_cache_not_all_slots_covered(self):
+ """
+ Test that if not all slots are covered it should raise an exception
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+ with pytest.raises(RedisClusterException) as ex:
+ get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots)
+ assert str(ex.value).startswith(
+ "All slots are not covered after query all startup_nodes.")
+
+ def test_init_slots_cache_not_require_full_coverage_error(self):
+ """
+ When require_full_coverage is set to False and not all slots are
+ covered, if one of the nodes has 'cluster-require_full_coverage'
+ config set to 'yes' the cluster initialization should fail
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+
+ with pytest.raises(RedisClusterException):
+ get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ coverage_result='yes')
+
+ def test_init_slots_cache_not_require_full_coverage_success(self):
+ """
+ When require_full_coverage is set to False and not all slots are
+ covered, if all of the nodes has 'cluster-require_full_coverage'
+ config set to 'no' the cluster initialization should succeed
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ coverage_result='no')
+
+ assert 5460 not in rc.nodes_manager.slots_cache
+
+ def test_init_slots_cache_not_require_full_coverage_skips_check(self):
+ """
+ Test that when require_full_coverage is set to False and
+ skip_full_coverage_check is set to true, the cluster initialization
+ succeed without checking the nodes' Redis configurations
+ """
+ # Missing slot 5460
+ cluster_slots = [
+ [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002],
+ ['127.0.0.1', 7005]],
+ ]
+
+ with patch.object(NodesManager,
+ 'cluster_require_full_coverage') as conf_check_mock:
+ rc = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=cluster_slots,
+ require_full_coverage=False,
+ skip_full_coverage_check=True,
+ coverage_result='no')
+
+ assert conf_check_mock.called is False
+ assert 5460 not in rc.nodes_manager.slots_cache
+
+ def test_init_slots_cache(self):
+ """
+ Test that slots cache can in initialized and all slots are covered
+ """
+ good_slots_resp = [
+ [0, 5460, ['127.0.0.1', 7000], ['127.0.0.2', 7003]],
+ [5461, 10922, ['127.0.0.1', 7001], ['127.0.0.2', 7004]],
+ [10923, 16383, ['127.0.0.1', 7002], ['127.0.0.2', 7005]],
+ ]
+
+ rc = get_mocked_redis_client(host=default_host, port=default_port,
+ cluster_slots=good_slots_resp)
+ n_manager = rc.nodes_manager
+ assert len(n_manager.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
+ for slot_info in good_slots_resp:
+ all_hosts = ['127.0.0.1', '127.0.0.2']
+ all_ports = [7000, 7001, 7002, 7003, 7004, 7005]
+ slot_start = slot_info[0]
+ slot_end = slot_info[1]
+ for i in range(slot_start, slot_end + 1):
+ assert len(n_manager.slots_cache[i]) == len(slot_info[2:])
+ assert n_manager.slots_cache[i][0].host in all_hosts
+ assert n_manager.slots_cache[i][1].host in all_hosts
+ assert n_manager.slots_cache[i][0].port in all_ports
+ assert n_manager.slots_cache[i][1].port in all_ports
+
+ assert len(n_manager.nodes_cache) == 6
+
+ def test_empty_startup_nodes(self):
+ """
+ It should not be possible to create a node manager with no nodes
+ specified
+ """
+ with pytest.raises(RedisClusterException):
+ NodesManager([])
+
+ def test_wrong_startup_nodes_type(self):
+ """
+ If something other then a list type itteratable is provided it should
+ fail
+ """
+ with pytest.raises(RedisClusterException):
+ NodesManager({})
+
+ def test_init_slots_cache_slots_collision(self, request):
+ """
+ Test that if 2 nodes do not agree on the same slots setup it should
+ raise an error. In this test both nodes will say that the first
+ slots block should be bound to different servers.
+ """
+ with patch.object(NodesManager,
+ 'create_redis_node') as create_redis_node:
+ def create_mocked_redis_node(host, port, **kwargs):
+ """
+ Helper function to return custom slots cache data from
+ different redis nodes
+ """
+ if port == 7000:
+ result = [
+ [
+ 0,
+ 5460,
+ ['127.0.0.1', 7000],
+ ['127.0.0.1', 7003],
+ ],
+ [
+ 5461,
+ 10922,
+ ['127.0.0.1', 7001],
+ ['127.0.0.1', 7004],
+ ],
+ ]
+
+ elif port == 7001:
+ result = [
+ [
+ 0,
+ 5460,
+ ['127.0.0.1', 7001],
+ ['127.0.0.1', 7003],
+ ],
+ [
+ 5461,
+ 10922,
+ ['127.0.0.1', 7000],
+ ['127.0.0.1', 7004],
+ ],
+ ]
+ else:
+ result = []
+
+ r_node = Redis(
+ host=host,
+ port=port
+ )
+
+ orig_execute_command = r_node.execute_command
+
+ def execute_command(*args, **kwargs):
+ if args[0] == 'CLUSTER SLOTS':
+ return result
+ elif args[1] == 'cluster-require-full-coverage':
+ return {'cluster-require-full-coverage': 'yes'}
+ else:
+ return orig_execute_command(*args, **kwargs)
+
+ r_node.execute_command = execute_command
+ return r_node
+
+ create_redis_node.side_effect = create_mocked_redis_node
+
+ with pytest.raises(RedisClusterException) as ex:
+ node_1 = ClusterNode('127.0.0.1', 7000)
+ node_2 = ClusterNode('127.0.0.1', 7001)
+ RedisCluster(startup_nodes=[node_1, node_2])
+ assert str(ex.value).startswith(
+ "startup_nodes could not agree on a valid slots cache"), str(
+ ex.value)
+
+ def test_cluster_one_instance(self):
+ """
+ If the cluster exists of only 1 node then there is some hacks that must
+ be validated they work.
+ """
+ node = ClusterNode(default_host, default_port)
+ cluster_slots = [[0, 16383, ['', default_port]]]
+ rc = get_mocked_redis_client(startup_nodes=[node],
+ cluster_slots=cluster_slots)
+
+ n = rc.nodes_manager
+ assert len(n.nodes_cache) == 1
+ n_node = rc.get_node(node_name=node.name)
+ assert n_node is not None
+ assert n_node == node
+ assert n_node.server_type == PRIMARY
+ assert len(n.slots_cache) == REDIS_CLUSTER_HASH_SLOTS
+ for i in range(0, REDIS_CLUSTER_HASH_SLOTS):
+ assert n.slots_cache[i] == [n_node]
+
+ def test_init_with_down_node(self):
+ """
+ If I can't connect to one of the nodes, everything should still work.
+ But if I can't connect to any of the nodes, exception should be thrown.
+ """
+ with patch.object(NodesManager,
+ 'create_redis_node') as create_redis_node:
+ def create_mocked_redis_node(host, port, **kwargs):
+ if port == 7000:
+ raise ConnectionError('mock connection error for 7000')
+
+ r_node = Redis(host=host, port=port, decode_responses=True)
+
+ def execute_command(*args, **kwargs):
+ if args[0] == 'CLUSTER SLOTS':
+ return [
+ [
+ 0, 8191,
+ ['127.0.0.1', 7001, 'node_1'],
+ ],
+ [
+ 8192, 16383,
+ ['127.0.0.1', 7002, 'node_2'],
+ ]
+ ]
+ elif args[1] == 'cluster-require-full-coverage':
+ return {'cluster-require-full-coverage': 'yes'}
+
+ r_node.execute_command = execute_command
+
+ return r_node
+
+ create_redis_node.side_effect = create_mocked_redis_node
+
+ node_1 = ClusterNode('127.0.0.1', 7000)
+ node_2 = ClusterNode('127.0.0.1', 7001)
+
+ # If all startup nodes fail to connect, connection error should be
+ # thrown
+ with pytest.raises(RedisClusterException) as e:
+ RedisCluster(startup_nodes=[node_1])
+ assert 'Redis Cluster cannot be connected' in str(e.value)
+
+ with patch.object(CommandsParser, 'initialize',
+ autospec=True) as cmd_parser_initialize:
+
+ def cmd_init_mock(self, r):
+ self.commands = {'get': {'name': 'get', 'arity': 2,
+ 'flags': ['readonly',
+ 'fast'],
+ 'first_key_pos': 1,
+ 'last_key_pos': 1,
+ 'step_count': 1}}
+
+ cmd_parser_initialize.side_effect = cmd_init_mock
+ # When at least one startup node is reachable, the cluster
+ # initialization should succeeds
+ rc = RedisCluster(startup_nodes=[node_1, node_2])
+ assert rc.get_node(host=default_host, port=7001) is not None
+ assert rc.get_node(host=default_host, port=7002) is not None
+
+
+@pytest.mark.onlycluster
+class TestClusterPubSubObject:
+ """
+ Tests for the ClusterPubSub class
+ """
+
+ def test_init_pubsub_with_host_and_port(self, r):
+ """
+ Test creation of pubsub instance with passed host and port
+ """
+ node = r.get_default_node()
+ p = r.pubsub(host=node.host, port=node.port)
+ assert p.get_pubsub_node() == node
+
+ def test_init_pubsub_with_node(self, r):
+ """
+ Test creation of pubsub instance with passed node
+ """
+ node = r.get_default_node()
+ p = r.pubsub(node=node)
+ assert p.get_pubsub_node() == node
+
+ def test_init_pubusub_without_specifying_node(self, r):
+ """
+ Test creation of pubsub instance without specifying a node. The node
+ should be determined based on the keyslot of the first command
+ execution.
+ """
+ channel_name = 'foo'
+ node = r.get_node_from_key(channel_name)
+ p = r.pubsub()
+ assert p.get_pubsub_node() is None
+ p.subscribe(channel_name)
+ assert p.get_pubsub_node() == node
+
+ def test_init_pubsub_with_a_non_existent_node(self, r):
+ """
+ Test creation of pubsub instance with node that doesn't exists in the
+ cluster. RedisClusterException should be raised.
+ """
+ node = ClusterNode('1.1.1.1', 1111)
+ with pytest.raises(RedisClusterException):
+ r.pubsub(node)
+
+ def test_init_pubsub_with_a_non_existent_host_port(self, r):
+ """
+ Test creation of pubsub instance with host and port that don't belong
+ to a node in the cluster.
+ RedisClusterException should be raised.
+ """
+ with pytest.raises(RedisClusterException):
+ r.pubsub(host='1.1.1.1', port=1111)
+
+ def test_init_pubsub_host_or_port(self, r):
+ """
+ Test creation of pubsub instance with host but without port, and vice
+ versa. DataError should be raised.
+ """
+ with pytest.raises(DataError):
+ r.pubsub(host='localhost')
+
+ with pytest.raises(DataError):
+ r.pubsub(port=16379)
+
+ def test_get_redis_connection(self, r):
+ """
+ Test that get_redis_connection() returns the redis connection of the
+ set pubsub node
+ """
+ node = r.get_default_node()
+ p = r.pubsub(node=node)
+ assert p.get_redis_connection() == node.redis_connection
+
+
+@pytest.mark.onlycluster
+class TestClusterPipeline:
+ """
+ Tests for the ClusterPipeline class
+ """
+
+ def test_blocked_methods(self, r):
+ """
+ Currently some method calls on a Cluster pipeline
+ is blocked when using in cluster mode.
+ They maybe implemented in the future.
+ """
+ pipe = r.pipeline()
+ with pytest.raises(RedisClusterException):
+ pipe.multi()
+
+ with pytest.raises(RedisClusterException):
+ pipe.immediate_execute_command()
+
+ with pytest.raises(RedisClusterException):
+ pipe._execute_transaction(None, None, None)
+
+ with pytest.raises(RedisClusterException):
+ pipe.load_scripts()
+
+ with pytest.raises(RedisClusterException):
+ pipe.watch()
+
+ with pytest.raises(RedisClusterException):
+ pipe.unwatch()
+
+ with pytest.raises(RedisClusterException):
+ pipe.script_load_for_pipeline(None)
+
+ with pytest.raises(RedisClusterException):
+ pipe.eval()
+
+ def test_blocked_arguments(self, r):
+ """
+ Currently some arguments is blocked when using in cluster mode.
+ They maybe implemented in the future.
+ """
+ with pytest.raises(RedisClusterException) as ex:
+ r.pipeline(transaction=True)
+
+ assert str(ex.value).startswith(
+ "transaction is deprecated in cluster mode") is True
+
+ with pytest.raises(RedisClusterException) as ex:
+ r.pipeline(shard_hint=True)
+
+ assert str(ex.value).startswith(
+ "shard_hint is deprecated in cluster mode") is True
+
+ def test_redis_cluster_pipeline(self, r):
+ """
+ Test that we can use a pipeline with the RedisCluster class
+ """
+ with r.pipeline() as pipe:
+ pipe.set("foo", "bar")
+ pipe.get("foo")
+ assert pipe.execute() == [True, b'bar']
+
+ def test_mget_disabled(self, r):
+ """
+ Test that mget is disabled for ClusterPipeline
+ """
+ with r.pipeline() as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.mget(['a'])
+
+ def test_mset_disabled(self, r):
+ """
+ Test that mset is disabled for ClusterPipeline
+ """
+ with r.pipeline() as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.mset({'a': 1, 'b': 2})
+
+ def test_rename_disabled(self, r):
+ """
+ Test that rename is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.rename('a', 'b')
+
+ def test_renamenx_disabled(self, r):
+ """
+ Test that renamenx is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.renamenx('a', 'b')
+
+ def test_delete_single(self, r):
+ """
+ Test a single delete operation
+ """
+ r['a'] = 1
+ with r.pipeline(transaction=False) as pipe:
+ pipe.delete('a')
+ assert pipe.execute() == [1]
+
+ def test_multi_delete_unsupported(self, r):
+ """
+ Test that multi delete operation is unsupported
+ """
+ with r.pipeline(transaction=False) as pipe:
+ r['a'] = 1
+ r['b'] = 2
+ with pytest.raises(RedisClusterException):
+ pipe.delete('a', 'b')
+
+ def test_brpoplpush_disabled(self, r):
+ """
+ Test that brpoplpush is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.brpoplpush()
+
+ def test_rpoplpush_disabled(self, r):
+ """
+ Test that rpoplpush is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.rpoplpush()
+
+ def test_sort_disabled(self, r):
+ """
+ Test that sort is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sort()
+
+ def test_sdiff_disabled(self, r):
+ """
+ Test that sdiff is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sdiff()
+
+ def test_sdiffstore_disabled(self, r):
+ """
+ Test that sdiffstore is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sdiffstore()
+
+ def test_sinter_disabled(self, r):
+ """
+ Test that sinter is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sinter()
+
+ def test_sinterstore_disabled(self, r):
+ """
+ Test that sinterstore is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sinterstore()
+
+ def test_smove_disabled(self, r):
+ """
+ Test that move is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.smove()
+
+ def test_sunion_disabled(self, r):
+ """
+ Test that sunion is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sunion()
+
+ def test_sunionstore_disabled(self, r):
+ """
+ Test that sunionstore is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.sunionstore()
+
+ def test_spfmerge_disabled(self, r):
+ """
+ Test that spfmerge is disabled for ClusterPipeline
+ """
+ with r.pipeline(transaction=False) as pipe:
+ with pytest.raises(RedisClusterException):
+ pipe.pfmerge()
+
+ def test_multi_key_operation_with_a_single_slot(self, r):
+ """
+ Test multi key operation with a single slot
+ """
+ pipe = r.pipeline(transaction=False)
+ pipe.set('a{foo}', 1)
+ pipe.set('b{foo}', 2)
+ pipe.set('c{foo}', 3)
+ pipe.get('a{foo}')
+ pipe.get('b{foo}')
+ pipe.get('c{foo}')
+
+ res = pipe.execute()
+ assert res == [True, True, True, b'1', b'2', b'3']
+
+ def test_multi_key_operation_with_multi_slots(self, r):
+ """
+ Test multi key operation with more than one slot
+ """
+ pipe = r.pipeline(transaction=False)
+ pipe.set('a{foo}', 1)
+ pipe.set('b{foo}', 2)
+ pipe.set('c{foo}', 3)
+ pipe.set('bar', 4)
+ pipe.set('bazz', 5)
+ pipe.get('a{foo}')
+ pipe.get('b{foo}')
+ pipe.get('c{foo}')
+ pipe.get('bar')
+ pipe.get('bazz')
+ res = pipe.execute()
+ assert res == [True, True, True, True, True, b'1', b'2', b'3', b'4',
+ b'5']
+
+ def test_connection_error_not_raised(self, r):
+ """
+ Test that the pipeline doesn't raise an error on connection error when
+ raise_on_error=False
+ """
+ key = 'foo'
+ node = r.get_node_from_key(key, False)
+
+ def raise_connection_error():
+ e = ConnectionError("error")
+ return e
+
+ with r.pipeline() as pipe:
+ mock_node_resp_func(node, raise_connection_error)
+ res = pipe.get(key).get(key).execute(raise_on_error=False)
+ assert node.redis_connection.connection.read_response.called
+ assert isinstance(res[0], ConnectionError)
+
+ def test_connection_error_raised(self, r):
+ """
+ Test that the pipeline raises an error on connection error when
+ raise_on_error=True
+ """
+ key = 'foo'
+ node = r.get_node_from_key(key, False)
+
+ def raise_connection_error():
+ e = ConnectionError("error")
+ return e
+
+ with r.pipeline() as pipe:
+ mock_node_resp_func(node, raise_connection_error)
+ with pytest.raises(ConnectionError):
+ pipe.get(key).get(key).execute(raise_on_error=True)
+
+ def test_asking_error(self, r):
+ """
+ Test redirection on ASK error
+ """
+ key = 'foo'
+ first_node = r.get_node_from_key(key, False)
+ ask_node = None
+ for node in r.get_nodes():
+ if node != first_node:
+ ask_node = node
+ break
+ if ask_node is None:
+ warnings.warn("skipping this test since the cluster has only one "
+ "node")
+ return
+ ask_msg = f"{r.keyslot(key)} {ask_node.host}:{ask_node.port}"
+
+ def raise_ask_error():
+ raise AskError(ask_msg)
+
+ with r.pipeline() as pipe:
+ mock_node_resp_func(first_node, raise_ask_error)
+ mock_node_resp(ask_node, 'MOCK_OK')
+ res = pipe.get(key).execute()
+ assert first_node.redis_connection.connection.read_response.called
+ assert ask_node.redis_connection.connection.read_response.called
+ assert res == ['MOCK_OK']
+
+ def test_empty_stack(self, r):
+ """
+ If pipeline is executed with no commands it should
+ return a empty list.
+ """
+ p = r.pipeline()
+ result = p.execute()
+ assert result == []
+
+
+@pytest.mark.onlycluster
+class TestReadOnlyPipeline:
+ """
+ Tests for ClusterPipeline class in readonly mode
+ """
+
+ def test_pipeline_readonly(self, r):
+ """
+ On readonly mode, we supports get related stuff only.
+ """
+ r.readonly(target_nodes='all')
+ r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001
+ r.zadd('foo88',
+ {'z1': 1}) # we assume this key is set on 127.0.0.1:7002
+ r.zadd('foo88', {'z2': 4})
+
+ with r.pipeline() as readonly_pipe:
+ readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True)
+ assert readonly_pipe.execute() == [
+ b'a1',
+ [(b'z1', 1.0), (b'z2', 4)],
+ ]
+
+ def test_moved_redirection_on_slave_with_default(self, r):
+ """
+ On Pipeline, we redirected once and finally get from master with
+ readonly client when data is completely moved.
+ """
+ key = 'bar'
+ r.set(key, 'foo')
+ # set read_from_replicas to True
+ r.read_from_replicas = True
+ primary = r.get_node_from_key(key, False)
+ replica = r.get_node_from_key(key, True)
+ with r.pipeline() as readwrite_pipe:
+ mock_node_resp(primary, "MOCK_FOO")
+ if replica is not None:
+ moved_error = f"{r.keyslot(key)} {primary.host}:{primary.port}"
+
+ def raise_moved_error():
+ raise MovedError(moved_error)
+
+ mock_node_resp_func(replica, raise_moved_error)
+ assert readwrite_pipe.reinitialize_counter == 0
+ readwrite_pipe.get(key).get(key)
+ assert readwrite_pipe.execute() == ["MOCK_FOO", "MOCK_FOO"]
+ if replica is not None:
+ # the slot has a replica as well, so MovedError should have
+ # occurred. If MovedError occurs, we should see the
+ # reinitialize_counter increase.
+ assert readwrite_pipe.reinitialize_counter == 1
+ conn = replica.redis_connection.connection
+ assert conn.read_response.called is True
+
+ def test_readonly_pipeline_from_readonly_client(self, request):
+ """
+ Test that the pipeline is initialized with readonly mode if the client
+ has it enabled
+ """
+ # Create a cluster with reading from replications
+ ro = _get_client(RedisCluster, request, read_from_replicas=True)
+ key = 'bar'
+ ro.set(key, 'foo')
+ import time
+ time.sleep(0.2)
+ with ro.pipeline() as readonly_pipe:
+ mock_all_nodes_resp(ro, 'MOCK_OK')
+ assert readonly_pipe.read_from_replicas is True
+ assert readonly_pipe.get(key).get(
+ key).execute() == ['MOCK_OK', 'MOCK_OK']
+ slot_nodes = ro.nodes_manager.slots_cache[ro.keyslot(key)]
+ if len(slot_nodes) > 1:
+ executed_on_replica = False
+ for node in slot_nodes:
+ if node.server_type == REPLICA:
+ conn = node.redis_connection.connection
+ executed_on_replica = conn.read_response.called
+ if executed_on_replica:
+ break
+ assert executed_on_replica is True
diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py
new file mode 100644
index 0000000..ba129ba
--- /dev/null
+++ b/tests/test_command_parser.py
@@ -0,0 +1,62 @@
+import pytest
+
+from redis.commands import CommandsParser
+
+
+class TestCommandsParser:
+ def test_init_commands(self, r):
+ commands_parser = CommandsParser(r)
+ assert commands_parser.commands is not None
+ assert 'get' in commands_parser.commands
+
+ def test_get_keys_predetermined_key_location(self, r):
+ commands_parser = CommandsParser(r)
+ args1 = ['GET', 'foo']
+ args2 = ['OBJECT', 'encoding', 'foo']
+ args3 = ['MGET', 'foo', 'bar', 'foobar']
+ assert commands_parser.get_keys(r, *args1) == ['foo']
+ assert commands_parser.get_keys(r, *args2) == ['foo']
+ assert commands_parser.get_keys(r, *args3) == ['foo', 'bar', 'foobar']
+
+ @pytest.mark.filterwarnings("ignore:ResponseError")
+ def test_get_moveable_keys(self, r):
+ commands_parser = CommandsParser(r)
+ args1 = ['EVAL', 'return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}', 2, 'key1',
+ 'key2', 'first', 'second']
+ args2 = ['XREAD', 'COUNT', 2, b'STREAMS', 'mystream', 'writers', 0, 0]
+ args3 = ['ZUNIONSTORE', 'out', 2, 'zset1', 'zset2', 'WEIGHTS', 2, 3]
+ args4 = ['GEORADIUS', 'Sicily', 15, 37, 200, 'km', 'WITHCOORD',
+ b'STORE', 'out']
+ args5 = ['MEMORY USAGE', 'foo']
+ args6 = ['MIGRATE', '192.168.1.34', 6379, "", 0, 5000, b'KEYS',
+ 'key1', 'key2', 'key3']
+ args7 = ['MIGRATE', '192.168.1.34', 6379, "key1", 0, 5000]
+ args8 = ['STRALGO', 'LCS', 'STRINGS', 'string_a', 'string_b']
+ args9 = ['STRALGO', 'LCS', 'KEYS', 'key1', 'key2']
+
+ assert commands_parser.get_keys(
+ r, *args1).sort() == ['key1', 'key2'].sort()
+ assert commands_parser.get_keys(
+ r, *args2).sort() == ['mystream', 'writers'].sort()
+ assert commands_parser.get_keys(
+ r, *args3).sort() == ['out', 'zset1', 'zset2'].sort()
+ assert commands_parser.get_keys(
+ r, *args4).sort() == ['Sicily', 'out'].sort()
+ assert commands_parser.get_keys(r, *args5).sort() == ['foo'].sort()
+ assert commands_parser.get_keys(
+ r, *args6).sort() == ['key1', 'key2', 'key3'].sort()
+ assert commands_parser.get_keys(r, *args7).sort() == ['key1'].sort()
+ assert commands_parser.get_keys(r, *args8) is None
+ assert commands_parser.get_keys(
+ r, *args9).sort() == ['key1', 'key2'].sort()
+
+ def test_get_pubsub_keys(self, r):
+ commands_parser = CommandsParser(r)
+ args1 = ['PUBLISH', 'foo', 'bar']
+ args2 = ['PUBSUB NUMSUB', 'foo1', 'foo2', 'foo3']
+ args3 = ['PUBSUB channels', '*']
+ args4 = ['SUBSCRIBE', 'foo1', 'foo2', 'foo3']
+ assert commands_parser.get_keys(r, *args1) == ['foo']
+ assert commands_parser.get_keys(r, *args2) == ['foo1', 'foo2', 'foo3']
+ assert commands_parser.get_keys(r, *args3) == ['*']
+ assert commands_parser.get_keys(r, *args4) == ['foo1', 'foo2', 'foo3']
diff --git a/tests/test_commands.py b/tests/test_commands.py
index b732702..ab7de05 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -35,7 +35,7 @@ def slowlog(request, r):
def redis_server_time(client):
seconds, milliseconds = client.time()
- timestamp = float('%s.%s' % (seconds, milliseconds))
+ timestamp = float(f'{seconds}.{milliseconds}')
return datetime.datetime.fromtimestamp(timestamp)
@@ -47,6 +47,7 @@ def get_stream_message(client, stream, message_id):
# RESPONSE CALLBACKS
+@pytest.mark.onlynoncluster
class TestResponseCallbacks:
"Tests for the response callback system"
@@ -68,18 +69,21 @@ class TestRedisCommands:
r['a']
# SERVER INFORMATION
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_cat_no_category(self, r):
categories = r.acl_cat()
assert isinstance(categories, list)
assert 'read' in categories
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_cat_with_category(self, r):
commands = r.acl_cat('read')
assert isinstance(commands, list)
assert 'get' in commands
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_deluser(self, r, request):
@@ -95,7 +99,7 @@ class TestRedisCommands:
assert r.acl_deluser(username) == 1
# now, a group of users
- users = ['bogususer_%d' % r for r in range(0, 5)]
+ users = [f'bogususer_{r}' for r in range(0, 5)]
for u in users:
r.acl_setuser(u, enabled=False, reset=True)
assert r.acl_deluser(*users) > 1
@@ -105,6 +109,7 @@ class TestRedisCommands:
assert r.acl_getuser(users[3]) is None
assert r.acl_getuser(users[4]) is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_genpass(self, r):
@@ -119,6 +124,7 @@ class TestRedisCommands:
r.acl_genpass(555)
assert isinstance(password, str)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_getuser_setuser(self, r, request):
@@ -156,11 +162,11 @@ class TestRedisCommands:
commands=['+get', '+mget', '-hset'],
keys=['cache:*', 'objects:*'])
acl = r.acl_getuser(username)
- assert set(acl['categories']) == set(['-@all', '+@set', '+@hash'])
- assert set(acl['commands']) == set(['+get', '+mget', '-hset'])
+ assert set(acl['categories']) == {'-@all', '+@set', '+@hash'}
+ assert set(acl['commands']) == {'+get', '+mget', '-hset'}
assert acl['enabled'] is True
assert 'on' in acl['flags']
- assert set(acl['keys']) == set([b'cache:*', b'objects:*'])
+ assert set(acl['keys']) == {b'cache:*', b'objects:*'}
assert len(acl['passwords']) == 2
# test reset=False keeps existing ACL and applies new ACL on top
@@ -175,11 +181,11 @@ class TestRedisCommands:
commands=['+mget'],
keys=['objects:*'])
acl = r.acl_getuser(username)
- assert set(acl['categories']) == set(['-@all', '+@set', '+@hash'])
- assert set(acl['commands']) == set(['+get', '+mget'])
+ assert set(acl['categories']) == {'-@all', '+@set', '+@hash'}
+ assert set(acl['commands']) == {'+get', '+mget'}
assert acl['enabled'] is True
assert 'on' in acl['flags']
- assert set(acl['keys']) == set([b'cache:*', b'objects:*'])
+ assert set(acl['keys']) == {b'cache:*', b'objects:*'}
assert len(acl['passwords']) == 2
# test removal of passwords
@@ -207,12 +213,14 @@ class TestRedisCommands:
hashed_passwords=['-' + hashed_password])
assert len(r.acl_getuser(username)['passwords']) == 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_help(self, r):
res = r.acl_help()
assert isinstance(res, list)
assert len(res) != 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_list(self, r, request):
@@ -226,6 +234,7 @@ class TestRedisCommands:
users = r.acl_list()
assert len(users) == 2
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_log(self, r, request):
@@ -262,6 +271,7 @@ class TestRedisCommands:
assert 'client-info' in r.acl_log(count=1)[0]
assert r.acl_log_reset()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_categories_without_prefix_fails(self, r, request):
@@ -274,6 +284,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.acl_setuser(username, categories=['list'])
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_commands_without_prefix_fails(self, r, request):
@@ -286,6 +297,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.acl_setuser(username, commands=['get'])
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
@skip_if_redis_enterprise
def test_acl_setuser_add_passwords_and_nopass_fails(self, r, request):
@@ -298,28 +310,33 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.acl_setuser(username, passwords='+mypass', nopass=True)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_users(self, r):
users = r.acl_users()
assert isinstance(users, list)
assert len(users) > 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_acl_whoami(self, r):
username = r.acl_whoami()
assert isinstance(username, str)
+ @pytest.mark.onlynoncluster
def test_client_list(self, r):
clients = r.client_list()
assert isinstance(clients[0], dict)
assert 'addr' in clients[0]
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_client_info(self, r):
info = r.client_info()
assert isinstance(info, dict)
assert 'addr' in info
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_client_list_types_not_replica(self, r):
with pytest.raises(exceptions.RedisError):
@@ -333,6 +350,7 @@ class TestRedisCommands:
clients = r.client_list(_type='replica')
assert isinstance(clients, list)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_client_list_client_id(self, r, request):
clients = r.client_list()
@@ -347,16 +365,19 @@ class TestRedisCommands:
clients_listed = r.client_list(client_id=clients[:-1])
assert len(clients_listed) > 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_client_id(self, r):
assert r.client_id() > 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_client_trackinginfo(self, r):
res = r.client_trackinginfo()
assert len(res) > 2
assert 'prefixes' in res
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_client_unblock(self, r):
myid = r.client_id()
@@ -364,15 +385,18 @@ class TestRedisCommands:
assert not r.client_unblock(myid, error=True)
assert not r.client_unblock(myid, error=False)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.9')
def test_client_getname(self, r):
assert r.client_getname() is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.9')
def test_client_setname(self, r):
assert r.client_setname('redis_py_test')
assert r.client_getname() == 'redis_py_test'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.9')
def test_client_kill(self, r, r2):
r.client_setname('redis-py-c1')
@@ -381,8 +405,7 @@ class TestRedisCommands:
if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
assert len(clients) == 2
- clients_by_name = dict([(client.get('name'), client)
- for client in clients])
+ clients_by_name = {client.get('name'): client for client in clients}
client_addr = clients_by_name['redis-py-c2'].get('addr')
assert r.client_kill(client_addr) is True
@@ -406,6 +429,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.client_kill_filter(_type="caster")
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.12')
def test_client_kill_filter_by_id(self, r, r2):
r.client_setname('redis-py-c1')
@@ -414,8 +438,7 @@ class TestRedisCommands:
if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
assert len(clients) == 2
- clients_by_name = dict([(client.get('name'), client)
- for client in clients])
+ clients_by_name = {client.get('name'): client for client in clients}
client_2_id = clients_by_name['redis-py-c2'].get('id')
resp = r.client_kill_filter(_id=client_2_id)
@@ -426,6 +449,7 @@ class TestRedisCommands:
assert len(clients) == 1
assert clients[0].get('name') == 'redis-py-c1'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.12')
def test_client_kill_filter_by_addr(self, r, r2):
r.client_setname('redis-py-c1')
@@ -434,8 +458,7 @@ class TestRedisCommands:
if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
assert len(clients) == 2
- clients_by_name = dict([(client.get('name'), client)
- for client in clients])
+ clients_by_name = {client.get('name'): client for client in clients}
client_2_addr = clients_by_name['redis-py-c2'].get('addr')
resp = r.client_kill_filter(addr=client_2_addr)
@@ -461,8 +484,7 @@ class TestRedisCommands:
if client.get('name') in ['redis-py-c1', 'redis-py-c2']]
assert len(clients) == 2
- clients_by_name = dict([(client.get('name'), client)
- for client in clients])
+ clients_by_name = {client.get('name'): client for client in clients}
client_2_addr = clients_by_name['redis-py-c2'].get('laddr')
assert r.client_kill_filter(laddr=client_2_addr)
@@ -481,6 +503,7 @@ class TestRedisCommands:
assert c['user'] != killuser
r.acl_deluser(killuser)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.9.50')
@skip_if_redis_enterprise
def test_client_pause(self, r):
@@ -489,11 +512,13 @@ class TestRedisCommands:
with pytest.raises(exceptions.RedisError):
r.client_pause(timeout='not an integer')
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
@skip_if_redis_enterprise
def test_client_unpause(self, r):
assert r.client_unpause() == b'OK'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.2.0')
def test_client_reply(self, r, r_timeout):
assert r_timeout.client_reply('ON') == b'OK'
@@ -507,6 +532,7 @@ class TestRedisCommands:
# validate it was set
assert r.get('foo') == b'bar'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.0.0')
@skip_if_redis_enterprise
def test_client_getredir(self, r):
@@ -519,6 +545,7 @@ class TestRedisCommands:
# # assert 'maxmemory' in data
# assert data['maxmemory'].isdigit()
+ @pytest.mark.onlynoncluster
@skip_if_redis_enterprise
def test_config_resetstat(self, r):
r.ping()
@@ -535,14 +562,17 @@ class TestRedisCommands:
assert r.config_set('timeout', 0)
assert r.config_get()['timeout'] == '0'
+ @pytest.mark.onlynoncluster
def test_dbsize(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
assert r.dbsize() == 2
+ @pytest.mark.onlynoncluster
def test_echo(self, r):
assert r.echo('foo bar') == b'foo bar'
+ @pytest.mark.onlynoncluster
def test_info(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
@@ -551,10 +581,12 @@ class TestRedisCommands:
assert 'arch_bits' in info.keys()
assert 'redis_version' in info.keys()
+ @pytest.mark.onlynoncluster
@skip_if_redis_enterprise
def test_lastsave(self, r):
assert isinstance(r.lastsave(), datetime.datetime)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
def test_lolwut(self, r):
lolwut = r.lolwut().decode('utf-8')
@@ -573,6 +605,7 @@ class TestRedisCommands:
def test_ping(self, r):
assert r.ping()
+ @pytest.mark.onlynoncluster
def test_quit(self, r):
assert r.quit()
@@ -619,6 +652,7 @@ class TestRedisCommands:
# Complexity info stored as fourth item in list
response.insert(3, COMPLEXITY_STATEMENT)
return r.response_callbacks[command_name](responses, **options)
+
r.parse_response = parse_response
# test
@@ -632,6 +666,7 @@ class TestRedisCommands:
# tear down monkeypatch
r.parse_response = old_parse_response
+ @pytest.mark.onlynoncluster
def test_slowlog_get_limit(self, r, slowlog):
assert r.slowlog_reset()
r.get('foo')
@@ -640,6 +675,7 @@ class TestRedisCommands:
# only one command, based on the number we passed to slowlog_get()
assert len(slowlog) == 1
+ @pytest.mark.onlynoncluster
def test_slowlog_length(self, r, slowlog):
r.get('foo')
assert isinstance(r.slowlog_len(), int)
@@ -683,12 +719,14 @@ class TestRedisCommands:
assert r.bitcount('a', -2, -1) == 2
assert r.bitcount('a', 1, 1) == 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_not_empty_string(self, r):
r['a'] = ''
r.bitop('not', 'r', 'a')
assert r.get('r') is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_not(self, r):
test_str = b'\xAA\x00\xFF\x55'
@@ -697,6 +735,7 @@ class TestRedisCommands:
r.bitop('not', 'r', 'a')
assert int(binascii.hexlify(r['r']), 16) == correct
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_not_in_place(self, r):
test_str = b'\xAA\x00\xFF\x55'
@@ -705,6 +744,7 @@ class TestRedisCommands:
r.bitop('not', 'a', 'a')
assert int(binascii.hexlify(r['a']), 16) == correct
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_single_string(self, r):
test_str = b'\x01\x02\xFF'
@@ -716,6 +756,7 @@ class TestRedisCommands:
assert r['res2'] == test_str
assert r['res3'] == test_str
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.6.0')
def test_bitop_string_operands(self, r):
r['a'] = b'\x01\x02\xFF\xFF'
@@ -727,6 +768,7 @@ class TestRedisCommands:
assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF
assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.7')
def test_bitpos(self, r):
key = 'key:bitpos'
@@ -749,6 +791,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.RedisError):
r.bitpos(key, 7) == 12
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_copy(self, r):
assert r.copy("a", "b") == 0
@@ -757,6 +800,7 @@ class TestRedisCommands:
assert r.get("a") == b"foo"
assert r.get("b") == b"foo"
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_copy_and_replace(self, r):
r.set("a", "foo1")
@@ -764,6 +808,7 @@ class TestRedisCommands:
assert r.copy("a", "b") == 0
assert r.copy("a", "b", replace=True) == 1
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_copy_to_another_database(self, request):
r0 = _get_client(redis.Redis, request, db=0)
@@ -979,6 +1024,7 @@ class TestRedisCommands:
assert r.incrbyfloat('a', 1.1) == 2.1
assert float(r['a']) == float(2.1)
+ @pytest.mark.onlynoncluster
def test_keys(self, r):
assert r.keys() == []
keys_with_underscores = {b'test_a', b'test_b'}
@@ -988,6 +1034,7 @@ class TestRedisCommands:
assert set(r.keys(pattern='test_*')) == keys_with_underscores
assert set(r.keys(pattern='test*')) == keys
+ @pytest.mark.onlynoncluster
def test_mget(self, r):
assert r.mget([]) == []
assert r.mget(['a', 'b']) == [None, None]
@@ -996,24 +1043,28 @@ class TestRedisCommands:
r['c'] = '3'
assert r.mget('a', 'other', 'b', 'c') == [b'1', None, b'2', b'3']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_lmove(self, r):
r.rpush('a', 'one', 'two', 'three', 'four')
assert r.lmove('a', 'b')
assert r.lmove('a', 'b', 'right', 'left')
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_blmove(self, r):
r.rpush('a', 'one', 'two', 'three', 'four')
assert r.blmove('a', 'b', 5)
assert r.blmove('a', 'b', 1, 'RIGHT', 'LEFT')
+ @pytest.mark.onlynoncluster
def test_mset(self, r):
d = {'a': b'1', 'b': b'2', 'c': b'3'}
assert r.mset(d)
for k, v in d.items():
assert r[k] == v
+ @pytest.mark.onlynoncluster
def test_msetnx(self, r):
d = {'a': b'1', 'b': b'2', 'c': b'3'}
assert r.msetnx(d)
@@ -1092,18 +1143,21 @@ class TestRedisCommands:
# with duplications
assert len(r.hrandfield('key', -10)) == 10
+ @pytest.mark.onlynoncluster
def test_randomkey(self, r):
assert r.randomkey() is None
for key in ('a', 'b', 'c'):
r[key] = 1
assert r.randomkey() in (b'a', b'b', b'c')
+ @pytest.mark.onlynoncluster
def test_rename(self, r):
r['a'] = '1'
assert r.rename('a', 'b')
assert r.get('a') is None
assert r['b'] == b'1'
+ @pytest.mark.onlynoncluster
def test_renamenx(self, r):
r['a'] = '1'
r['b'] = '2'
@@ -1209,8 +1263,8 @@ class TestRedisCommands:
@skip_if_server_version_lt('6.0.0')
def test_stralgo_lcs(self, r):
- key1 = 'key1'
- key2 = 'key2'
+ key1 = '{foo}key1'
+ key2 = '{foo}key2'
value1 = 'ohmytext'
value2 = 'mynewtext'
res = 'mytext'
@@ -1300,6 +1354,7 @@ class TestRedisCommands:
assert r.type('a') == b'zset'
# LIST COMMANDS
+ @pytest.mark.onlynoncluster
def test_blpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
@@ -1311,6 +1366,7 @@ class TestRedisCommands:
r.rpush('c', '1')
assert r.blpop('c', timeout=1) == (b'c', b'1')
+ @pytest.mark.onlynoncluster
def test_brpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
@@ -1322,6 +1378,7 @@ class TestRedisCommands:
r.rpush('c', '1')
assert r.brpop('c', timeout=1) == (b'c', b'1')
+ @pytest.mark.onlynoncluster
def test_brpoplpush(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
@@ -1331,6 +1388,7 @@ class TestRedisCommands:
assert r.lrange('a', 0, -1) == []
assert r.lrange('b', 0, -1) == [b'1', b'2', b'3', b'4']
+ @pytest.mark.onlynoncluster
def test_brpoplpush_empty_string(self, r):
r.rpush('a', '')
assert r.brpoplpush('a', 'b') == b''
@@ -1434,6 +1492,7 @@ class TestRedisCommands:
assert r.rpop('a') is None
assert r.rpop('a', 3) is None
+ @pytest.mark.onlynoncluster
def test_rpoplpush(self, r):
r.rpush('a', 'a1', 'a2', 'a3')
r.rpush('b', 'b1', 'b2', 'b3')
@@ -1487,6 +1546,7 @@ class TestRedisCommands:
assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4']
# SCAN COMMANDS
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_scan(self, r):
r.set('a', 1)
@@ -1498,6 +1558,7 @@ class TestRedisCommands:
_, keys = r.scan(match='a')
assert set(keys) == {b'a'}
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt("6.0.0")
def test_scan_type(self, r):
r.sadd('a-set', 1)
@@ -1506,6 +1567,7 @@ class TestRedisCommands:
_, keys = r.scan(match='a*', _type='SET')
assert set(keys) == {b'a-set'}
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_scan_iter(self, r):
r.set('a', 1)
@@ -1577,12 +1639,14 @@ class TestRedisCommands:
r.sadd('a', '1', '2', '3')
assert r.scard('a') == 3
+ @pytest.mark.onlynoncluster
def test_sdiff(self, r):
r.sadd('a', '1', '2', '3')
assert r.sdiff('a', 'b') == {b'1', b'2', b'3'}
r.sadd('b', '2', '3')
assert r.sdiff('a', 'b') == {b'1'}
+ @pytest.mark.onlynoncluster
def test_sdiffstore(self, r):
r.sadd('a', '1', '2', '3')
assert r.sdiffstore('c', 'a', 'b') == 3
@@ -1591,12 +1655,14 @@ class TestRedisCommands:
assert r.sdiffstore('c', 'a', 'b') == 1
assert r.smembers('c') == {b'1'}
+ @pytest.mark.onlynoncluster
def test_sinter(self, r):
r.sadd('a', '1', '2', '3')
assert r.sinter('a', 'b') == set()
r.sadd('b', '2', '3')
assert r.sinter('a', 'b') == {b'2', b'3'}
+ @pytest.mark.onlynoncluster
def test_sinterstore(self, r):
r.sadd('a', '1', '2', '3')
assert r.sinterstore('c', 'a', 'b') == 0
@@ -1623,6 +1689,7 @@ class TestRedisCommands:
assert r.smismember('a', '1', '4', '2', '3') == result_list
assert r.smismember('a', ['1', '4', '2', '3']) == result_list
+ @pytest.mark.onlynoncluster
def test_smove(self, r):
r.sadd('a', 'a1', 'a2')
r.sadd('b', 'b1', 'b2')
@@ -1668,11 +1735,13 @@ class TestRedisCommands:
assert r.srem('a', '2', '4') == 2
assert r.smembers('a') == {b'1', b'3'}
+ @pytest.mark.onlynoncluster
def test_sunion(self, r):
r.sadd('a', '1', '2')
r.sadd('b', '2', '3')
assert r.sunion('a', 'b') == {b'1', b'2', b'3'}
+ @pytest.mark.onlynoncluster
def test_sunionstore(self, r):
r.sadd('a', '1', '2')
r.sadd('b', '2', '3')
@@ -1684,6 +1753,7 @@ class TestRedisCommands:
with pytest.raises(NotImplementedError):
r.debug_segfault()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.2.0')
def test_script_debug(self, r):
with pytest.raises(NotImplementedError):
@@ -1740,11 +1810,11 @@ class TestRedisCommands:
def test_zadd_gt_lt(self, r):
for i in range(1, 20):
- r.zadd('a', {'a%s' % i: i})
+ r.zadd('a', {f'a{i}': i})
assert r.zadd('a', {'a20': 5}, gt=3) == 1
for i in range(1, 20):
- r.zadd('a', {'a%s' % i: i})
+ r.zadd('a', {f'a{i}': i})
assert r.zadd('a', {'a2': 5}, lt=1) == 0
# cannot use both nx and xx options
@@ -1765,6 +1835,7 @@ class TestRedisCommands:
assert r.zcount('a', 1, '(' + str(2)) == 1
assert r.zcount('a', 10, 20) == 0
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zdiff(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
@@ -1772,6 +1843,7 @@ class TestRedisCommands:
assert r.zdiff(['a', 'b']) == [b'a3']
assert r.zdiff(['a', 'b'], withscores=True) == [b'a3', b'3']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zdiffstore(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
@@ -1793,6 +1865,7 @@ class TestRedisCommands:
assert r.zlexcount('a', '-', '+') == 7
assert r.zlexcount('a', '[b', '[f') == 5
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zinter(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 1})
@@ -1815,6 +1888,7 @@ class TestRedisCommands:
assert r.zinter({'a': 1, 'b': 2, 'c': 3}, withscores=True) \
== [(b'a3', 20), (b'a1', 23)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_sum(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -1823,6 +1897,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a3', 8), (b'a1', 9)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_max(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -1831,6 +1906,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a3', 5), (b'a1', 6)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_min(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5})
@@ -1839,6 +1915,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a1', 1), (b'a3', 3)]
+ @pytest.mark.onlynoncluster
def test_zinterstore_with_weight(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -1877,6 +1954,7 @@ class TestRedisCommands:
# with duplications
assert len(r.zrandmember('a', -10)) == 10
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.9.0')
def test_bzpopmax(self, r):
r.zadd('a', {'a1': 1, 'a2': 2})
@@ -1889,6 +1967,7 @@ class TestRedisCommands:
r.zadd('c', {'c1': 100})
assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.9.0')
def test_bzpopmin(self, r):
r.zadd('a', {'a1': 1, 'a2': 2})
@@ -1958,6 +2037,7 @@ class TestRedisCommands:
# rev
assert r.zrange('a', 0, 1, desc=True) == [b'a5', b'a4']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zrangestore(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
@@ -2095,6 +2175,7 @@ class TestRedisCommands:
assert r.zscore('a', 'a2') == 2.0
assert r.zscore('a', 'a4') is None
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_zunion(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
@@ -2115,6 +2196,7 @@ class TestRedisCommands:
assert r.zunion({'a': 1, 'b': 2, 'c': 3}, withscores=True)\
== [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_sum(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -2123,6 +2205,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_max(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -2131,6 +2214,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_min(self, r):
r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4})
@@ -2139,6 +2223,7 @@ class TestRedisCommands:
assert r.zrange('d', 0, -1, withscores=True) == \
[(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)]
+ @pytest.mark.onlynoncluster
def test_zunionstore_with_weight(self, r):
r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1})
r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2})
@@ -2166,6 +2251,7 @@ class TestRedisCommands:
assert r.pfadd('a', *members) == 0
assert r.pfcount('a') == len(members)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.9')
def test_pfcount(self, r):
members = {b'1', b'2', b'3'}
@@ -2176,6 +2262,7 @@ class TestRedisCommands:
assert r.pfcount('b') == len(members_b)
assert r.pfcount('a', 'b') == len(members_b.union(members))
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.9')
def test_pfmerge(self, r):
mema = {b'1', b'2', b'3'}
@@ -2270,8 +2357,9 @@ class TestRedisCommands:
assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3']
def test_hmset(self, r):
- warning_message = (r'^Redis\.hmset\(\) is deprecated\. '
- r'Use Redis\.hset\(\) instead\.$')
+ redis_class = type(r).__name__
+ warning_message = (r'^{0}\.hmset\(\) is deprecated\. '
+ r'Use {0}\.hset\(\) instead\.$'.format(redis_class))
h = {b'a': b'1', b'b': b'2', b'c': b'3'}
with pytest.warns(DeprecationWarning, match=warning_message):
assert r.hmset('a', h)
@@ -2306,6 +2394,7 @@ class TestRedisCommands:
r.rpush('a', '3', '2', '1', '4')
assert r.sort('a', start=1, num=2) == [b'2', b'3']
+ @pytest.mark.onlynoncluster
def test_sort_by(self, r):
r['score:1'] = 8
r['score:2'] = 3
@@ -2313,6 +2402,7 @@ class TestRedisCommands:
r.rpush('a', '3', '2', '1')
assert r.sort('a', by='score:*') == [b'2', b'3', b'1']
+ @pytest.mark.onlynoncluster
def test_sort_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2320,6 +2410,7 @@ class TestRedisCommands:
r.rpush('a', '2', '3', '1')
assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3']
+ @pytest.mark.onlynoncluster
def test_sort_get_multi(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2328,6 +2419,7 @@ class TestRedisCommands:
assert r.sort('a', get=('user:*', '#')) == \
[b'u1', b'1', b'u2', b'2', b'u3', b'3']
+ @pytest.mark.onlynoncluster
def test_sort_get_groups_two(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2336,6 +2428,7 @@ class TestRedisCommands:
assert r.sort('a', get=('user:*', '#'), groups=True) == \
[(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')]
+ @pytest.mark.onlynoncluster
def test_sort_groups_string_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2344,6 +2437,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.sort('a', get='user:*', groups=True)
+ @pytest.mark.onlynoncluster
def test_sort_groups_just_one_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2360,6 +2454,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
r.sort('a', groups=True)
+ @pytest.mark.onlynoncluster
def test_sort_groups_three_gets(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
@@ -2384,11 +2479,13 @@ class TestRedisCommands:
assert r.sort('a', alpha=True) == \
[b'a', b'b', b'c', b'd', b'e']
+ @pytest.mark.onlynoncluster
def test_sort_store(self, r):
r.rpush('a', '2', '3', '1')
assert r.sort('a', store='sorted_values') == 3
assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3']
+ @pytest.mark.onlynoncluster
def test_sort_all_options(self, r):
r['user:1:username'] = 'zeus'
r['user:2:username'] = 'titan'
@@ -2421,66 +2518,84 @@ class TestRedisCommands:
r.execute_command('SADD', 'issue#924', 1)
r.execute_command('SORT', 'issue#924')
+ @pytest.mark.onlynoncluster
def test_cluster_addslots(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_count_failure_reports(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'COUNT-FAILURE-REPORTS', 'node'), int)
+ @pytest.mark.onlynoncluster
def test_cluster_countkeysinslot(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'COUNTKEYSINSLOT', 2), int)
+ @pytest.mark.onlynoncluster
def test_cluster_delslots(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_failover(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_forget(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('FORGET', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_info(self, mock_cluster_resp_info):
assert isinstance(mock_cluster_resp_info.cluster('info'), dict)
+ @pytest.mark.onlynoncluster
def test_cluster_keyslot(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'keyslot', 'asdf'), int)
+ @pytest.mark.onlynoncluster
def test_cluster_meet(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True
+ @pytest.mark.onlynoncluster
def test_cluster_nodes(self, mock_cluster_resp_nodes):
assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict)
+ @pytest.mark.onlynoncluster
def test_cluster_replicate(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True
+ @pytest.mark.onlynoncluster
def test_cluster_reset(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('reset', 'hard') is True
+ @pytest.mark.onlynoncluster
def test_cluster_saveconfig(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('saveconfig') is True
+ @pytest.mark.onlynoncluster
def test_cluster_setslot(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('setslot', 1,
'IMPORTING', 'nodeid') is True
+ @pytest.mark.onlynoncluster
def test_cluster_slaves(self, mock_cluster_resp_slaves):
assert isinstance(mock_cluster_resp_slaves.cluster(
'slaves', 'nodeid'), dict)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.0.0')
@skip_if_redis_enterprise
def test_readwrite(self, r):
assert r.readwrite()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.0.0')
def test_readonly_invalid_cluster_state(self, r):
with pytest.raises(exceptions.RedisError):
r.readonly()
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.0.0')
def test_readonly(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.readonly() is True
@@ -2707,6 +2822,7 @@ class TestRedisCommands:
with pytest.raises(exceptions.DataError):
assert r.geosearch('barcelona', member='place3', radius=100, any=1)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('6.2.0')
def test_geosearchstore(self, r):
values = (2.1909389952632, 41.433791470673, 'place1') + \
@@ -2717,6 +2833,7 @@ class TestRedisCommands:
longitude=2.191, latitude=41.433, radius=1000)
assert r.zrange('places_barcelona', 0, -1) == [b'place1']
+ @pytest.mark.onlynoncluster
@skip_unless_arch_bits(64)
@skip_if_server_version_lt('6.2.0')
def test_geosearchstore_dist(self, r):
@@ -2808,6 +2925,7 @@ class TestRedisCommands:
assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') == \
[b'place2', b'place1']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('3.2.0')
def test_georadius_store(self, r):
values = (2.1909389952632, 41.433791470673, 'place1') + \
@@ -2817,6 +2935,7 @@ class TestRedisCommands:
r.georadius('barcelona', 2.191, 41.433, 1000, store='places_barcelona')
assert r.zrange('places_barcelona', 0, -1) == [b'place1']
+ @pytest.mark.onlynoncluster
@skip_unless_arch_bits(64)
@skip_if_server_version_lt('3.2.0')
def test_georadius_store_dist(self, r):
@@ -3664,6 +3783,7 @@ class TestRedisCommands:
r.set('foo', 'bar')
assert isinstance(r.memory_usage('foo'), int)
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.0.0')
@skip_if_redis_enterprise
def test_module_list(self, r):
@@ -3677,14 +3797,25 @@ class TestRedisCommands:
assert isinstance(res, int)
assert res >= 100
+ @pytest.mark.onlynoncluster
+ @skip_if_server_version_lt('2.8.13')
+ def test_command_getkeys(self, r):
+ res = r.command_getkeys('MSET', 'a', 'b', 'c', 'd', 'e', 'f')
+ assert res == ['a', 'c', 'e']
+ res = r.command_getkeys('EVAL', '"not consulted"',
+ '3', 'key1', 'key2', 'key3',
+ 'arg1', 'arg2', 'arg3', 'argN')
+ assert res == ['key1', 'key2', 'key3']
+
@skip_if_server_version_lt('2.8.13')
def test_command(self, r):
res = r.command()
assert len(res) >= 100
- cmds = [c[0].decode() for c in res]
+ cmds = list(res.keys())
assert 'set' in cmds
assert 'get' in cmds
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('4.0.0')
@skip_if_redis_enterprise
def test_module(self, r):
@@ -3740,6 +3871,7 @@ class TestRedisCommands:
assert r.restore(key, 0, dumpdata, frequency=5)
assert r.get(key) == b'blee!'
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('5.0.0')
@skip_if_redis_enterprise
def test_replicaof(self, r):
@@ -3748,6 +3880,7 @@ class TestRedisCommands:
assert r.replicaof("NO", "ONE")
+@pytest.mark.onlynoncluster
class TestBinarySave:
def test_binary_get_set(self, r):
diff --git a/tests/test_connection.py b/tests/test_connection.py
index 7c44768..0071aca 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -8,13 +8,14 @@ from .conftest import skip_if_server_version_lt
@pytest.mark.skipif(HIREDIS_AVAILABLE, reason='PythonParser only')
+@pytest.mark.onlynoncluster
def test_invalid_response(r):
raw = b'x'
parser = r.connection._parser
with mock.patch.object(parser._buffer, 'readline', return_value=raw):
with pytest.raises(InvalidResponse) as cm:
parser.read_response()
- assert str(cm.value) == 'Protocol Error: %r' % raw
+ assert str(cm.value) == f'Protocol Error: {raw!r}'
@skip_if_server_version_lt('4.0.0')
diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py
index 521f520..288d43d 100644
--- a/tests/test_connection_pool.py
+++ b/tests/test_connection_pool.py
@@ -484,6 +484,7 @@ class TestConnection:
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.8')
@skip_if_redis_enterprise
def test_busy_loading_disconnects_socket(self, r):
@@ -495,6 +496,7 @@ class TestConnection:
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
assert not r.connection._sock
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.8')
@skip_if_redis_enterprise
def test_busy_loading_from_pipeline_immediate_command(self, r):
@@ -511,6 +513,7 @@ class TestConnection:
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.8')
@skip_if_redis_enterprise
def test_busy_loading_from_pipeline(self, r):
@@ -571,6 +574,7 @@ class TestConnection:
r.execute_command('DEBUG', 'ERROR', 'ERR invalid password')
+@pytest.mark.onlynoncluster
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
@@ -584,6 +588,7 @@ class TestMultiConnectionClient:
assert r.get('a') == b'123'
+@pytest.mark.onlynoncluster
class TestHealthCheck:
interval = 60
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 467e00c..402eccf 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -5,7 +5,8 @@ from redis.commands.helpers import (
nativestr,
parse_to_list,
quote_string,
- random_string
+ random_string,
+ parse_to_dict
)
@@ -19,11 +20,34 @@ def test_list_or_args():
def test_parse_to_list():
+ assert parse_to_list(None) == []
r = ["hello", b"my name", "45", "555.55", "is simon!", None]
assert parse_to_list(r) == \
["hello", "my name", 45, 555.55, "is simon!", None]
+def test_parse_to_dict():
+ assert parse_to_dict(None) == {}
+ r = [['Some number', '1.0345'],
+ ['Some string', 'hello'],
+ ['Child iterators',
+ ['Time', '0.2089', 'Counter', 3, 'Child iterators',
+ ['Type', 'bar', 'Time', '0.0729', 'Counter', 3],
+ ['Type', 'barbar', 'Time', '0.058', 'Counter', 3]]]]
+ assert parse_to_dict(r) == {
+ 'Child iterators': {
+ 'Child iterators': [
+ {'Counter': 3.0, 'Time': 0.0729, 'Type': 'bar'},
+ {'Counter': 3.0, 'Time': 0.058, 'Type': 'barbar'}
+ ],
+ 'Counter': 3.0,
+ 'Time': 0.2089
+ },
+ 'Some number': 1.0345,
+ 'Some string': 'hello'
+ }
+
+
def test_nativestr():
assert nativestr('teststr') == 'teststr'
assert nativestr(b'teststr') == 'teststr'
diff --git a/tests/test_json.py b/tests/test_json.py
index abc5776..187bfe2 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -275,7 +275,6 @@ def test_objlen(client):
assert len(obj) == client.json().objlen("obj")
-@pytest.mark.pipeline
@pytest.mark.redismod
def test_json_commands_in_pipeline(client):
p = client.json().pipeline()
@@ -290,8 +289,9 @@ def test_json_commands_in_pipeline(client):
client.flushdb()
p = client.json().pipeline()
d = {"hello": "world", "oh": "snap"}
- p.jsonset("foo", Path.rootPath(), d)
- p.jsonget("foo")
+ with pytest.deprecated_call():
+ p.jsonset("foo", Path.rootPath(), d)
+ p.jsonget("foo")
p.exists("notarealkey")
p.delete("foo")
assert [True, d, 0, 1] == p.execute()
@@ -463,14 +463,18 @@ def test_numby_commands_dollar(client):
client.json().set("doc1", "$", {"a": "b", "b": [
{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- assert client.json().nummultby("doc1", "$..a", 2) == \
- [None, 4, 10, None]
- assert client.json().nummultby("doc1", "$..a", 2.5) == \
- [None, 10.0, 25.0, None]
+ # test list
+ with pytest.deprecated_call():
+ assert client.json().nummultby("doc1", "$..a", 2) == \
+ [None, 4, 10, None]
+ assert client.json().nummultby("doc1", "$..a", 2.5) == \
+ [None, 10.0, 25.0, None]
+
# Test single
- assert client.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
- assert client.json().nummultby("doc1", "$.b[2].a", 2) == [None]
- assert client.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
+ with pytest.deprecated_call():
+ assert client.json().nummultby("doc1", "$.b[1].a", 2) == [50.0]
+ assert client.json().nummultby("doc1", "$.b[2].a", 2) == [None]
+ assert client.json().nummultby("doc1", "$.b[1].a", 3) == [150.0]
# test missing keys
with pytest.raises(exceptions.ResponseError):
@@ -485,7 +489,9 @@ def test_numby_commands_dollar(client):
# Test legacy NUMMULTBY
client.json().set("doc1", "$", {"a": "b", "b": [
{"a": 2}, {"a": 5.0}, {"a": "c"}]})
- client.json().nummultby("doc1", ".b[0].a", 3) == 6
+
+ with pytest.deprecated_call():
+ client.json().nummultby("doc1", ".b[0].a", 3) == 6
@pytest.mark.redismod
@@ -824,9 +830,11 @@ def test_objkeys_dollar(client):
# Test missing key
assert client.json().objkeys("non_existing_doc", "..a") is None
- # Test missing key
+ # Test non existing doc
with pytest.raises(exceptions.ResponseError):
- client.json().objkeys("doc1", "$.nowhere")
+ assert client.json().objkeys("non_existing_doc", "$..a") == []
+
+ assert client.json().objkeys("doc1", "$..nowhere") == []
@pytest.mark.redismod
@@ -845,12 +853,11 @@ def test_objlen_dollar(client):
# Test single
assert client.json().objlen("doc1", "$.nested1.a") == [2]
- # Test missing key
- assert client.json().objlen("non_existing_doc", "$..a") is None
-
- # Test missing path
+ # Test missing key, and path
with pytest.raises(exceptions.ResponseError):
- client.json().objlen("doc1", "$.nowhere")
+ client.json().objlen("non_existing_doc", "$..a")
+
+ assert client.json().objlen("doc1", "$.nowhere") == []
# Test legacy
assert client.json().objlen("doc1", ".*.a") == 2
@@ -862,8 +869,8 @@ def test_objlen_dollar(client):
assert client.json().objlen("non_existing_doc", "..a") is None
# Test missing path
- with pytest.raises(exceptions.ResponseError):
- client.json().objlen("doc1", ".nowhere")
+ # with pytest.raises(exceptions.ResponseError):
+ client.json().objlen("doc1", ".nowhere")
@pytest.mark.redismod
@@ -1143,11 +1150,11 @@ def test_resp_dollar(client):
]
# Test missing path
- with pytest.raises(exceptions.ResponseError):
- client.json().resp("doc1", "$.nowhere")
+ client.json().resp("doc1", "$.nowhere")
# Test missing key
- assert client.json().resp("non_existing_doc", "$..a") is None
+ # with pytest.raises(exceptions.ResponseError):
+ client.json().resp("non_existing_doc", "$..a")
@pytest.mark.redismod
@@ -1391,6 +1398,7 @@ def test_arrindex_dollar(client):
"None") == 0
+@pytest.mark.redismod
def test_decoders_and_unstring():
assert unstring("4") == 4
assert unstring("45.55") == 45.55
diff --git a/tests/test_lock.py b/tests/test_lock.py
index fa76385..66148ed 100644
--- a/tests/test_lock.py
+++ b/tests/test_lock.py
@@ -7,6 +7,7 @@ from redis.lock import Lock
from .conftest import _get_client
+@pytest.mark.onlynoncluster
class TestLock:
@pytest.fixture()
def r_decoded(self, request):
@@ -220,6 +221,7 @@ class TestLock:
lock.reacquire()
+@pytest.mark.onlynoncluster
class TestLockClassSelection:
def test_lock_class_argument(self, r):
class MyLock:
diff --git a/tests/test_monitor.py b/tests/test_monitor.py
index a8a535b..6c3ea33 100644
--- a/tests/test_monitor.py
+++ b/tests/test_monitor.py
@@ -1,3 +1,4 @@
+import pytest
from .conftest import (
skip_if_redis_enterprise,
skip_ifnot_redis_enterprise,
@@ -5,6 +6,7 @@ from .conftest import (
)
+@pytest.mark.onlynoncluster
class TestMonitor:
def test_wait_command_not_found(self, r):
"Make sure the wait_for_command func works when command is not found"
diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py
index d0feef1..5968b2b 100644
--- a/tests/test_multiprocessing.py
+++ b/tests/test_multiprocessing.py
@@ -89,9 +89,7 @@ class TestMultiprocessing:
A child will create its own connections when using a pool created
by a parent.
"""
- pool = ConnectionPool.from_url('redis://{}:{}'.format(master_host[0],
- master_host[1],
- ),
+ pool = ConnectionPool.from_url(f'redis://{master_host[0]}:{master_host[1]}',
max_connections=max_connections)
conn = pool.get_connection('ping')
@@ -126,8 +124,7 @@ class TestMultiprocessing:
A child process that uses the same pool as its parent isn't affected
when the parent disconnects all connections within the pool.
"""
- pool = ConnectionPool.from_url('redis://{}:{}'.format(master_host[0],
- master_host[1]),
+ pool = ConnectionPool.from_url(f'redis://{master_host[0]}:{master_host[1]}',
max_connections=max_connections)
conn = pool.get_connection('ping')
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 08bd40b..a87ed71 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -59,6 +59,7 @@ class TestPipeline:
assert r['b'] == b'b1'
assert r['c'] == b'c1'
+ @pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch(self, r):
r['a'] = 0
@@ -70,6 +71,7 @@ class TestPipeline:
pipe.set('a', int(a) + 1)
assert pipe.execute() == [True]
+ @pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch_failure(self, r):
r['a'] = 0
@@ -129,6 +131,7 @@ class TestPipeline:
assert pipe.set('z', 'zzz').execute() == [True]
assert r['z'] == b'zzz'
+ @pytest.mark.onlynoncluster
def test_transaction_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
@@ -143,6 +146,7 @@ class TestPipeline:
assert result[1] == []
assert result[2]
+ @pytest.mark.onlynoncluster
def test_pipeline_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
@@ -171,6 +175,7 @@ class TestPipeline:
assert pipe.set('z', 'zzz').execute() == [True]
assert r['z'] == b'zzz'
+ @pytest.mark.onlynoncluster
def test_parse_error_raised_transaction(self, r):
with r.pipeline() as pipe:
pipe.multi()
@@ -186,6 +191,7 @@ class TestPipeline:
assert pipe.set('z', 'zzz').execute() == [True]
assert r['z'] == b'zzz'
+ @pytest.mark.onlynoncluster
def test_watch_succeed(self, r):
r['a'] = 1
r['b'] = 2
@@ -203,6 +209,7 @@ class TestPipeline:
assert pipe.execute() == [True]
assert not pipe.watching
+ @pytest.mark.onlynoncluster
def test_watch_failure(self, r):
r['a'] = 1
r['b'] = 2
@@ -217,6 +224,7 @@ class TestPipeline:
assert not pipe.watching
+ @pytest.mark.onlynoncluster
def test_watch_failure_in_empty_transaction(self, r):
r['a'] = 1
r['b'] = 2
@@ -230,6 +238,7 @@ class TestPipeline:
assert not pipe.watching
+ @pytest.mark.onlynoncluster
def test_unwatch(self, r):
r['a'] = 1
r['b'] = 2
@@ -242,6 +251,7 @@ class TestPipeline:
pipe.get('a')
assert pipe.execute() == [b'1']
+ @pytest.mark.onlynoncluster
def test_watch_exec_no_unwatch(self, r):
r['a'] = 1
r['b'] = 2
@@ -262,6 +272,7 @@ class TestPipeline:
unwatch_command = wait_for_command(r, m, 'UNWATCH')
assert unwatch_command is None, "should not send UNWATCH"
+ @pytest.mark.onlynoncluster
def test_watch_reset_unwatch(self, r):
r['a'] = 1
@@ -276,6 +287,7 @@ class TestPipeline:
assert unwatch_command is not None
assert unwatch_command['command'] == 'UNWATCH'
+ @pytest.mark.onlynoncluster
def test_transaction_callable(self, r):
r['a'] = 1
r['b'] = 2
@@ -300,6 +312,7 @@ class TestPipeline:
assert result == [True]
assert r['c'] == b'4'
+ @pytest.mark.onlynoncluster
def test_transaction_callable_returns_value_from_callable(self, r):
def callback(pipe):
# No need to do anything here since we only want the return value
@@ -332,7 +345,7 @@ class TestPipeline:
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
- expected = 'Command # 1 (LLEN %s) of pipeline caused error: ' % key
+ expected = f'Command # 1 (LLEN {key}) of pipeline caused error: '
assert str(ex.value).startswith(expected)
assert r[key] == b'1'
@@ -354,6 +367,7 @@ class TestPipeline:
assert pipe == pipe2
assert response == [True, [0, 0, 15, 15, 14], b'1']
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.0.0')
def test_pipeline_discard(self, r):
diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
index e242459..b019bae 100644
--- a/tests/test_pubsub.py
+++ b/tests/test_pubsub.py
@@ -55,7 +55,7 @@ def make_subscribe_test_data(pubsub, type):
'unsub_func': pubsub.punsubscribe,
'keys': ['f*', 'b*', 'uni' + chr(4456) + '*']
}
- assert False, 'invalid subscribe type: %s' % type
+ assert False, f'invalid subscribe type: {type}'
class TestPubSubSubscribeUnsubscribe:
@@ -123,6 +123,7 @@ class TestPubSubSubscribeUnsubscribe:
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_resubscribe_on_reconnection(**kwargs)
+ @pytest.mark.onlynoncluster
def test_resubscribe_to_patterns_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_resubscribe_on_reconnection(**kwargs)
@@ -177,6 +178,7 @@ class TestPubSubSubscribeUnsubscribe:
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribed_property(**kwargs)
+ @pytest.mark.onlynoncluster
def test_subscribe_property_with_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribed_property(**kwargs)
@@ -220,6 +222,7 @@ class TestPubSubSubscribeUnsubscribe:
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_sub_unsub_resub(**kwargs)
+ @pytest.mark.onlynoncluster
def test_sub_unsub_resub_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_sub_unsub_resub(**kwargs)
@@ -307,6 +310,7 @@ class TestPubSubMessages:
assert wait_for_message(p) is None
assert self.message == make_message('message', 'foo', 'test message')
+ @pytest.mark.onlynoncluster
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{'f*': self.message_handler})
@@ -326,6 +330,9 @@ class TestPubSubMessages:
assert wait_for_message(p) is None
assert self.message == make_message('message', channel, 'test message')
+ @pytest.mark.onlynoncluster
+ # see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
+ # #known-limitations-with-pubsub
def test_unicode_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
pattern = 'uni' + chr(4456) + '*'
@@ -401,6 +408,7 @@ class TestPubSubAutoDecoding:
self.channel,
self.data)
+ @pytest.mark.onlynoncluster
def test_pattern_publish(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
@@ -473,6 +481,7 @@ class TestPubSubRedisDown:
class TestPubSubSubcommands:
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_pubsub_channels(self, r):
p = r.pubsub()
@@ -482,6 +491,7 @@ class TestPubSubSubcommands:
expected = [b'bar', b'baz', b'foo', b'quux']
assert all([channel in r.pubsub_channels() for channel in expected])
+ @pytest.mark.onlynoncluster
@skip_if_server_version_lt('2.8.0')
def test_pubsub_numsub(self, r):
p1 = r.pubsub()
@@ -497,7 +507,7 @@ class TestPubSubSubcommands:
assert wait_for_message(p3)['type'] == 'subscribe'
channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)]
- assert channels == r.pubsub_numsub('foo', 'bar', 'baz')
+ assert r.pubsub_numsub('foo', 'bar', 'baz') == channels
@skip_if_server_version_lt('2.8.0')
def test_pubsub_numpat(self, r):
@@ -529,6 +539,7 @@ class TestPubSubPings:
pattern=None)
+@pytest.mark.onlynoncluster
class TestPubSubConnectionKilled:
@skip_if_server_version_lt('3.0.0')
diff --git a/tests/test_scripting.py b/tests/test_scripting.py
index 352f3ba..7614b12 100644
--- a/tests/test_scripting.py
+++ b/tests/test_scripting.py
@@ -22,6 +22,7 @@ return "hello " .. name
"""
+@pytest.mark.onlynoncluster
class TestScripting:
@pytest.fixture(autouse=True)
def reset_scripts(self, r):
diff --git a/tests/test_search.py b/tests/test_search.py
index d1fc75f..c7b570c 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -82,8 +82,8 @@ def createIndex(client, num_docs=100, definition=None):
try:
client.create_index(
(TextField("play", weight=5.0),
- TextField("txt"),
- NumericField("chapter")),
+ TextField("txt"),
+ NumericField("chapter")),
definition=definition,
)
except redis.ResponseError:
@@ -99,7 +99,7 @@ def createIndex(client, num_docs=100, definition=None):
play, chapter, _, text = \
line[1], line[2], line[4], line[5]
- key = "{}:{}".format(play, chapter).lower()
+ key = f"{play}:{chapter}".lower()
d = chapters.setdefault(key, {})
d["play"] = play
d["txt"] = d.get("txt", "") + " " + text
@@ -320,8 +320,8 @@ def test_stopwords(client):
def test_filters(client):
client.ft().create_index(
(TextField("txt"),
- NumericField("num"),
- GeoField("loc"))
+ NumericField("num"),
+ GeoField("loc"))
)
client.ft().add_document(
"doc1",
@@ -379,7 +379,7 @@ def test_payloads_with_no_content(client):
def test_sort_by(client):
client.ft().create_index(
(TextField("txt"),
- NumericField("num", sortable=True))
+ NumericField("num", sortable=True))
)
client.ft().add_document("doc1", txt="foo bar", num=1)
client.ft().add_document("doc2", txt="foo baz", num=2)
@@ -424,7 +424,7 @@ def test_example(client):
# Creating the index definition and schema
client.ft().create_index(
(TextField("title", weight=5.0),
- TextField("body"))
+ TextField("body"))
)
# Indexing a document
@@ -552,8 +552,8 @@ def test_no_index(client):
def test_partial(client):
client.ft().create_index(
(TextField("f1"),
- TextField("f2"),
- TextField("f3"))
+ TextField("f2"),
+ TextField("f3"))
)
client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
@@ -574,8 +574,8 @@ def test_partial(client):
def test_no_create(client):
client.ft().create_index(
(TextField("f1"),
- TextField("f2"),
- TextField("f3"))
+ TextField("f2"),
+ TextField("f3"))
)
client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
@@ -604,8 +604,8 @@ def test_no_create(client):
def test_explain(client):
client.ft().create_index(
(TextField("f1"),
- TextField("f2"),
- TextField("f3"))
+ TextField("f2"),
+ TextField("f3"))
)
res = client.ft().explain("@f3:f3_val @f2:f2_val @f1:f1_val")
assert res
@@ -629,8 +629,8 @@ def test_summarize(client):
doc = sorted(client.ft().search(q).docs)[0]
assert "<b>Henry</b> IV" == doc.play
assert (
- "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc.txt
+ "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
+ == doc.txt
)
q = Query("king henry").paging(0, 1).summarize().highlight()
@@ -638,8 +638,8 @@ def test_summarize(client):
doc = sorted(client.ft().search(q).docs)[0]
assert "<b>Henry</b> ... " == doc.play
assert (
- "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
- == doc.txt
+ "ACT I SCENE I. London. The palace. Enter <b>KING</b> <b>HENRY</b>, LORD JOHN OF LANCASTER, the EARL of WESTMORELAND, SIR... " # noqa
+ == doc.txt
)
@@ -812,10 +812,10 @@ def test_spell_check(client):
res = client.ft().spellcheck("lorm", include="dict")
assert len(res["lorm"]) == 3
assert (
- res["lorm"][0]["suggestion"],
- res["lorm"][1]["suggestion"],
- res["lorm"][2]["suggestion"],
- ) == ("lorem", "lore", "lorm")
+ res["lorm"][0]["suggestion"],
+ res["lorm"][1]["suggestion"],
+ res["lorm"][2]["suggestion"],
+ ) == ("lorem", "lore", "lorm")
assert (res["lorm"][0]["score"], res["lorm"][1]["score"]) == ("0.5", "0")
# test spellcheck exclude
@@ -861,7 +861,7 @@ def test_phonetic_matcher(client):
res = client.ft().search(Query("Jon"))
assert 2 == len(res.docs)
- assert ["John", "Jon"] == sorted([d.name for d in res.docs])
+ assert ["John", "Jon"] == sorted(d.name for d in res.docs)
@pytest.mark.redismod
@@ -873,7 +873,7 @@ def test_scorer(client):
)
client.ft().add_document(
"doc2",
- description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.", # noqa
+ description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.", # noqa
)
# default scorer is TFIDF
@@ -930,7 +930,7 @@ def test_config(client):
@pytest.mark.redismod
-def test_aggregations(client):
+def test_aggregations_groupby(client):
# Creating the index definition and schema
client.ft().create_index(
(
@@ -967,36 +967,242 @@ def test_aggregations(client):
req = aggregations.AggregateRequest("redis").group_by(
"@parent",
reducers.count(),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.count_distinct("@title"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.count_distinctish("@title"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.sum("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "21" # 10+8+3
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.min("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3" # min(10,8,3)
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.max("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "10" # max(10,8,3)
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.avg("@random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "7" # (10+3+8)/3
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.stddev("random_num"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "3.60555127546"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.quantile("@random_num", 0.5),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == "10"
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
reducers.tolist("@title"),
- reducers.first_value("@title"),
- reducers.random_sample("@title", 2),
)
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[3] == ["RediSearch", "RedisAI", "RedisJson"]
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
+ reducers.first_value("@title").alias("first"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res == ['parent', 'redis', 'first', 'RediSearch']
+
+ req = aggregations.AggregateRequest("redis").group_by(
+ "@parent",
+ reducers.random_sample("@title", 2).alias("random"),
+ )
+
+ res = client.ft().aggregate(req).rows[0]
+ assert res[1] == "redis"
+ assert res[2] == "random"
+ assert len(res[3]) == 2
+ assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"]
+
+
+@pytest.mark.redismod
+def test_aggregations_sort_by_and_limit(client):
+ client.ft().create_index(
+ (
+ TextField("t1"),
+ TextField("t2"),
+ )
+ )
+
+ client.ft().client.hset("doc1", mapping={'t1': 'a', 't2': 'b'})
+ client.ft().client.hset("doc2", mapping={'t1': 'b', 't2': 'a'})
+
+ # test sort_by using SortDirection
+ req = aggregations.AggregateRequest("*") \
+ .sort_by(aggregations.Asc("@t2"), aggregations.Desc("@t1"))
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t2', 'a', 't1', 'b']
+ assert res.rows[1] == ['t2', 'b', 't1', 'a']
+
+ # test sort_by without SortDirection
+ req = aggregations.AggregateRequest("*") \
+ .sort_by("@t1")
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t1', 'a']
+ assert res.rows[1] == ['t1', 'b']
+
+ # test sort_by with max
+ req = aggregations.AggregateRequest("*") \
+ .sort_by("@t1", max=1)
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 1
+
+ # test limit
+ req = aggregations.AggregateRequest("*") \
+ .sort_by("@t1").limit(1, 1)
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 1
+ assert res.rows[0] == ['t1', 'b']
+
+
+@pytest.mark.redismod
+def test_aggregations_load(client):
+ client.ft().create_index(
+ (
+ TextField("t1"),
+ TextField("t2"),
+ )
+ )
+
+ client.ft().client.hset("doc1", mapping={'t1': 'hello', 't2': 'world'})
+
+ # load t1
+ req = aggregations.AggregateRequest("*").load("t1")
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t1', 'hello']
+
+ # load t2
+ req = aggregations.AggregateRequest("*").load("t2")
+ res = client.ft().aggregate(req)
+ assert res.rows[0] == ['t2', 'world']
+
+
+@pytest.mark.redismod
+def test_aggregations_apply(client):
+ client.ft().create_index(
+ (
+ TextField("PrimaryKey", sortable=True),
+ NumericField("CreatedDateTimeUTC", sortable=True),
+ )
+ )
+
+ client.ft().client.hset(
+ "doc1",
+ mapping={
+ 'PrimaryKey': '9::362330',
+ 'CreatedDateTimeUTC': '637387878524969984'
+ }
+ )
+ client.ft().client.hset(
+ "doc2",
+ mapping={
+ 'PrimaryKey': '9::362329',
+ 'CreatedDateTimeUTC': '637387875859270016'
+ }
+ )
+
+ req = aggregations.AggregateRequest("*") \
+ .apply(CreatedDateTimeUTC='@CreatedDateTimeUTC * 10')
res = client.ft().aggregate(req)
+ assert res.rows[0] == ['CreatedDateTimeUTC', '6373878785249699840']
+ assert res.rows[1] == ['CreatedDateTimeUTC', '6373878758592700416']
- res = res.rows[0]
- assert len(res) == 26
- assert "redis" == res[1]
- assert "3" == res[3]
- assert "3" == res[5]
- assert "3" == res[7]
- assert "21" == res[9]
- assert "3" == res[11]
- assert "10" == res[13]
- assert "7" == res[15]
- assert "3.60555127546" == res[17]
- assert "10" == res[19]
- assert ["RediSearch", "RedisAI", "RedisJson"] == res[21]
- assert "RediSearch" == res[23]
- assert 2 == len(res[25])
+
+@pytest.mark.redismod
+def test_aggregations_filter(client):
+ client.ft().create_index(
+ (
+ TextField("name", sortable=True),
+ NumericField("age", sortable=True),
+ )
+ )
+
+ client.ft().client.hset(
+ "doc1",
+ mapping={
+ 'name': 'bar',
+ 'age': '25'
+ }
+ )
+ client.ft().client.hset(
+ "doc2",
+ mapping={
+ 'name': 'foo',
+ 'age': '19'
+ }
+ )
+
+ req = aggregations.AggregateRequest("*") \
+ .filter("@name=='foo' && @age < 20")
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 1
+ assert res.rows[0] == ['name', 'foo', 'age', '19']
+
+ req = aggregations.AggregateRequest("*") \
+ .filter("@age > 15").sort_by("@age")
+ res = client.ft().aggregate(req)
+ assert len(res.rows) == 2
+ assert res.rows[0] == ['age', '19']
+ assert res.rows[1] == ['age', '25']
@pytest.mark.redismod
@@ -1020,25 +1226,25 @@ def test_index_definition(client):
)
assert [
- "ON",
- "JSON",
- "PREFIX",
- 2,
- "hset:",
- "henry",
- "FILTER",
- "@f1==32",
- "LANGUAGE_FIELD",
- "play",
- "LANGUAGE",
- "English",
- "SCORE_FIELD",
- "chapter",
- "SCORE",
- 0.5,
- "PAYLOAD_FIELD",
- "txt",
- ] == definition.args
+ "ON",
+ "JSON",
+ "PREFIX",
+ 2,
+ "hset:",
+ "henry",
+ "FILTER",
+ "@f1==32",
+ "LANGUAGE_FIELD",
+ "play",
+ "LANGUAGE",
+ "English",
+ "SCORE_FIELD",
+ "chapter",
+ "SCORE",
+ 0.5,
+ "PAYLOAD_FIELD",
+ "txt",
+ ] == definition.args
createIndex(client.ft(), num_docs=500, definition=definition)
@@ -1313,3 +1519,46 @@ def test_json_with_jsonpath(client):
assert res.docs[0].id == "doc:1"
with pytest.raises(Exception):
res.docs[0].name_unsupported
+
+
+@pytest.mark.redismod
+def test_profile(client):
+ client.ft().create_index((TextField('t'),))
+ client.ft().client.hset('1', 't', 'hello')
+ client.ft().client.hset('2', 't', 'world')
+
+ # check using Query
+ q = Query('hello|world').no_content()
+ res, det = client.ft().profile(q)
+ assert det['Iterators profile']['Counter'] == 2.0
+ assert len(det['Iterators profile']['Child iterators']) == 2
+ assert det['Iterators profile']['Type'] == 'UNION'
+ assert det['Parsing time'] < 0.3
+ assert len(res.docs) == 2 # check also the search result
+
+ # check using AggregateRequest
+ req = aggregations.AggregateRequest("*").load("t")\
+ .apply(prefix="startswith(@t, 'hel')")
+ res, det = client.ft().profile(req)
+ assert det['Iterators profile']['Counter'] == 2.0
+ assert det['Iterators profile']['Type'] == 'WILDCARD'
+ assert det['Parsing time'] < 0.3
+ assert len(res.rows) == 2 # check also the search result
+
+
+@pytest.mark.redismod
+def test_profile_limited(client):
+ client.ft().create_index((TextField('t'),))
+ client.ft().client.hset('1', 't', 'hello')
+ client.ft().client.hset('2', 't', 'hell')
+ client.ft().client.hset('3', 't', 'help')
+ client.ft().client.hset('4', 't', 'helowa')
+
+ q = Query('%hell% hel*')
+ res, det = client.ft().profile(q, limited=True)
+ assert det['Iterators profile']['Child iterators'][0]['Child iterators'] \
+ == 'The number of iterators in the union is 3'
+ assert det['Iterators profile']['Child iterators'][1]['Child iterators'] \
+ == 'The number of iterators in the union is 4'
+ assert det['Iterators profile']['Type'] == 'INTERSECT'
+ assert len(res.docs) == 3 # check also the search result
diff --git a/tests/test_sentinel.py b/tests/test_sentinel.py
index 7f3ff0a..9377d5b 100644
--- a/tests/test_sentinel.py
+++ b/tests/test_sentinel.py
@@ -81,16 +81,19 @@ def sentinel(request, cluster):
return Sentinel([('foo', 26379), ('bar', 26379)])
+@pytest.mark.onlynoncluster
def test_discover_master(sentinel, master_ip):
address = sentinel.discover_master('mymaster')
assert address == (master_ip, 6379)
+@pytest.mark.onlynoncluster
def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
sentinel.discover_master('xxx')
+@pytest.mark.onlynoncluster
def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_down.add(('foo', 26379))
@@ -100,6 +103,7 @@ def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
assert sentinel.sentinels[0].id == ('bar', 26379)
+@pytest.mark.onlynoncluster
def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_timeout.add(('foo', 26379))
@@ -109,6 +113,7 @@ def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
assert sentinel.sentinels[0].id == ('bar', 26379)
+@pytest.mark.onlynoncluster
def test_master_min_other_sentinels(cluster, master_ip):
sentinel = Sentinel([('foo', 26379)], min_other_sentinels=1)
# min_other_sentinels
@@ -119,18 +124,21 @@ def test_master_min_other_sentinels(cluster, master_ip):
assert address == (master_ip, 6379)
+@pytest.mark.onlynoncluster
def test_master_odown(cluster, sentinel):
cluster.master['is_odown'] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master('mymaster')
+@pytest.mark.onlynoncluster
def test_master_sdown(cluster, sentinel):
cluster.master['is_sdown'] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master('mymaster')
+@pytest.mark.onlynoncluster
def test_discover_slaves(cluster, sentinel):
assert sentinel.discover_slaves('mymaster') == []
@@ -165,6 +173,7 @@ def test_discover_slaves(cluster, sentinel):
('slave0', 1234), ('slave1', 1234)]
+@pytest.mark.onlynoncluster
def test_master_for(cluster, sentinel, master_ip):
master = sentinel.master_for('mymaster', db=9)
assert master.ping()
@@ -175,6 +184,7 @@ def test_master_for(cluster, sentinel, master_ip):
assert master.ping()
+@pytest.mark.onlynoncluster
def test_slave_for(cluster, sentinel):
cluster.slaves = [
{'ip': '127.0.0.1', 'port': 6379,
@@ -184,6 +194,7 @@ def test_slave_for(cluster, sentinel):
assert slave.ping()
+@pytest.mark.onlynoncluster
def test_slave_for_slave_not_found_error(cluster, sentinel):
cluster.master['is_odown'] = True
slave = sentinel.slave_for('mymaster', db=9)
@@ -191,6 +202,7 @@ def test_slave_for_slave_not_found_error(cluster, sentinel):
slave.ping()
+@pytest.mark.onlynoncluster
def test_slave_round_robin(cluster, sentinel, master_ip):
cluster.slaves = [
{'ip': 'slave0', 'port': 6379, 'is_odown': False, 'is_sdown': False},
@@ -206,14 +218,17 @@ def test_slave_round_robin(cluster, sentinel, master_ip):
next(rotator)
+@pytest.mark.onlynoncluster
def test_ckquorum(cluster, sentinel):
assert sentinel.sentinel_ckquorum("mymaster")
+@pytest.mark.onlynoncluster
def test_flushconfig(cluster, sentinel):
assert sentinel.sentinel_flushconfig()
+@pytest.mark.onlynoncluster
def test_reset(cluster, sentinel):
cluster.master['is_odown'] = True
assert sentinel.sentinel_reset('mymaster')
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index 99c6083..0743357 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -31,7 +31,7 @@ def test_create(client):
def test_create_duplicate_policy(client):
# Test for duplicate policy
for duplicate_policy in ["block", "last", "first", "min", "max"]:
- ts_name = "time-serie-ooo-{0}".format(duplicate_policy)
+ ts_name = f"time-serie-ooo-{duplicate_policy}"
assert client.ts().create(ts_name, duplicate_policy=duplicate_policy)
info = client.ts().info(ts_name)
assert duplicate_policy == info.duplicate_policy
@@ -565,7 +565,6 @@ def test_query_index(client):
@pytest.mark.redismod
-@pytest.mark.pipeline
def test_pipeline(client):
pipeline = client.ts().pipeline()
pipeline.create("with_pipeline")
diff --git a/tox.ini b/tox.ini
index 6d4c658..f710bba 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,11 +2,13 @@
addopts = -s
markers =
redismod: run only the redis module tests
+ onlycluster: marks tests to be run only with cluster mode redis
+ onlynoncluster: marks tests to be run only with standalone redis
[tox]
minversion = 3.2.0
requires = tox-docker
-envlist = {py35,py36,py37,py38,py39,pypy3}-{plain,hiredis},linters
+envlist = {standalone,cluster}-{plain,hiredis}-{py35,py36,py37,py38,py39,pypy3},linters,docs
[docker:master]
name = master
@@ -74,6 +76,21 @@ image = redisfab/lots-of-pythons
volumes =
bind:rw:{toxinidir}:/data
+[docker:redis_cluster]
+name = redis_cluster
+image = redisfab/redis-py-cluster:6.2.6-buster
+ports =
+ 16379:16379/tcp
+ 16380:16380/tcp
+ 16381:16381/tcp
+ 16382:16382/tcp
+ 16383:16383/tcp
+ 16384:16384/tcp
+healtcheck_cmd = python -c "import socket;print(True) if all([0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',port)) for port in range(16379,16384)]) else False"
+volumes =
+ bind:rw:{toxinidir}/docker/cluster/redis.conf:/redis.conf
+
+
[testenv]
deps =
-r {toxinidir}/requirements.txt
@@ -84,11 +101,15 @@ docker =
sentinel_1
sentinel_2
sentinel_3
+ redis_cluster
redismod
extras =
hiredis: hiredis
+setenv =
+ CLUSTER_URL = "redis://localhost:16379/0"
commands =
- pytest --cov=./ --cov-report=xml -W always {posargs}
+ standalone: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' {posargs}
+ cluster: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} {posargs}
[testenv:devenv]
skipsdist = true
@@ -100,16 +121,18 @@ docker =
sentinel_1
sentinel_2
sentinel_3
+ redis_cluster
redismod
lots-of-pythons
commands = /usr/bin/echo
[testenv:linters]
deps_files = dev_requirements.txt
-docker=
+docker =
commands =
- flake8
+ flake8 --max-line-length=88
vulture redis whitelist.py --min-confidence 80
+ flynt --fail-on-change --dry-run .
skipsdist = true
skip_install = true
@@ -119,6 +142,13 @@ basepython = pypy3
[testenv:pypy3-hiredis]
basepython = pypy3
+[testenv:docs]
+deps_files = docs/requirements.txt
+docker =
+changedir = {toxinidir}/docs
+allowlist_externals = make
+commands = make html
+
[flake8]
exclude =
*.egg-info,